repo
stringlengths 2
99
| file
stringlengths 14
239
| code
stringlengths 20
3.99M
| file_length
int64 20
3.99M
| avg_line_length
float64 9.73
128
| max_line_length
int64 11
86.4k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
SyNet | SyNet-master/tensorpack/tensorpack/train/trainers.py | # File: trainers.py
import multiprocessing as mp
import os
import sys
import tensorflow as tf
from ..callbacks import CallbackFactory, RunOp
from ..graph_builder.distributed import DistributedParameterServerBuilder, DistributedReplicatedBuilder
from ..graph_builder.training import (
AsyncMultiGPUBuilder, SyncMultiGPUParameterServerBuilder, SyncMultiGPUReplicatedBuilder)
from ..graph_builder.utils import override_to_local_variable
from ..input_source import FeedfreeInput, QueueInput
from ..tfutils import get_global_step_var
from ..tfutils.distributed import get_distributed_session_creator
from ..tfutils.sesscreate import NewSessionCreator
from ..tfutils.tower import TrainTowerContext
from ..utils import logger
from ..utils.argtools import map_arg
from ..utils.develop import HIDE_DOC, deprecated
from .tower import SingleCostTrainer
__all__ = ['NoOpTrainer', 'SimpleTrainer',
'QueueInputTrainer',
'SyncMultiGPUTrainer',
'SyncMultiGPUTrainerReplicated',
'SyncMultiGPUTrainerParameterServer',
'AsyncMultiGPUTrainer',
'DistributedTrainerParameterServer',
'DistributedTrainerReplicated',
'HorovodTrainer', 'BytePSTrainer']
def _int_to_range(x):
if isinstance(x, int):
assert x > 0, "Argument cannot be {}!".format(x)
return list(range(x))
return x
class SimpleTrainer(SingleCostTrainer):
"""
Single-GPU single-cost single-tower trainer.
"""
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
logger.info("Building graph for a single training tower ...")
with TrainTowerContext(''):
grads = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)()
opt = get_opt_fn()
self.train_op = opt.apply_gradients(grads, name='train_op')
return []
class NoOpTrainer(SimpleTrainer):
"""
A special trainer that builds the graph (if given a tower function)
and does nothing in each step.
It is used to only run the callbacks.
Note that `steps_per_epoch` and `max_epochs` are still valid options.
"""
def run_step(self):
self.hooked_sess.run([])
# Only exists for type check & back-compatibility
class QueueInputTrainer(SimpleTrainer):
@deprecated("SimpleTrainer is sufficient!", "2019-12-31")
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
assert isinstance(input, QueueInput), input
return super(QueueInputTrainer, self)._setup_graph(input, get_cost_fn, get_opt_fn)
class SyncMultiGPUTrainerParameterServer(SingleCostTrainer):
__doc__ = SyncMultiGPUParameterServerBuilder.__doc__ + """
Attributes:
devices (list[int]): List of GPU ids.
"""
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, ps_device=None):
"""
Args:
gpus ([int]): list of GPU ids.
ps_device: either 'gpu' or 'cpu', where variables are stored.
The default value is subject to change.
"""
self.devices = gpus
if ps_device is None:
ps_device = 'gpu' if len(gpus) <= 2 else 'cpu'
self._builder = SyncMultiGPUParameterServerBuilder(gpus, ps_device)
super(SyncMultiGPUTrainerParameterServer, self).__init__()
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
if len(self.devices) > 1:
assert isinstance(input, FeedfreeInput), input
tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)
grad_list = self._builder.call_for_each_tower(tower_fn)
self.train_op = self._builder.build(grad_list, get_opt_fn)
return []
def SyncMultiGPUTrainer(gpus):
"""
Return a default multi-GPU trainer, if you don't care about the details.
It may not be the most efficient one for your task.
Args:
gpus (list[int]): list of GPU ids.
"""
return SyncMultiGPUTrainerParameterServer(gpus, ps_device='cpu')
class AsyncMultiGPUTrainer(SingleCostTrainer):
__doc__ = AsyncMultiGPUBuilder.__doc__ + """
Attributes:
devices (list[int]): List of GPU ids.
"""
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, scale_gradient=True):
"""
Args:
gpus ([int]): list of GPU ids.
scale_gradient (bool): if True, will scale each gradient by ``1.0/nr_gpu``.
"""
self.devices = gpus
self._builder = AsyncMultiGPUBuilder(gpus, scale_gradient)
super(AsyncMultiGPUTrainer, self).__init__()
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
if len(self.devices) > 1:
assert isinstance(input, FeedfreeInput), input
tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)
grad_list = self._builder.call_for_each_tower(tower_fn)
self.train_op = self._builder.build(grad_list, get_opt_fn)
return []
class SyncMultiGPUTrainerReplicated(SingleCostTrainer):
__doc__ = SyncMultiGPUReplicatedBuilder.__doc__ + """
Attributes:
devices (list[int]): List of GPU ids.
BROADCAST_EVERY_EPOCH (bool):
Whether to broadcast the variables every epoch.
Theoretically this is a no-op (because the variables
are supposed to be in-sync).
But this cheap operation may help prevent
certain numerical issues in practice.
Note that in cases such as BatchNorm, the variables may not be in sync.
"""
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, average=True, mode=None):
"""
Args:
gpus (int or [int]): list of GPU ids.
average (bool): whether to average or sum gradients.
mode (str or None): Gradient aggregation mode.
Supported values: ['nccl', 'hierarchical', 'cpu'].
Default to pick automatically by heuristics.
These modes may have slight (within 5%) differences in speed.
"hierarchical" mode was designed for DGX-like 8GPU machines.
"""
self.devices = gpus
if mode is None:
mode = 'hierarchical' if len(gpus) == 8 else 'nccl'
mode = mode.lower()
self._builder = SyncMultiGPUReplicatedBuilder(gpus, average, mode)
self.BROADCAST_EVERY_EPOCH = True
super(SyncMultiGPUTrainerReplicated, self).__init__()
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
if len(self.devices) > 1:
assert isinstance(input, FeedfreeInput), input
tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)
grad_list = self._builder.call_for_each_tower(tower_fn)
self.train_op, post_init_op = self._builder.build(grad_list, get_opt_fn)
if post_init_op is not None:
cb = RunOp(
post_init_op,
run_before=True,
run_as_trigger=self.BROADCAST_EVERY_EPOCH,
verbose=True)
cb.name_scope = "SyncVariables"
return [cb]
else:
return []
class DistributedTrainerBase(SingleCostTrainer):
devices = None
def __init__(self, gpus, server):
super(DistributedTrainerBase, self).__init__()
self.devices = gpus
self.server = server
self.job_name = server.server_def.job_name
logger.info("Distributed training on cluster:\n" + str(server.server_def.cluster))
def join(self):
logger.info("Calling server.join() on {}:{}".format(self.job_name, self.server.server_def.task_index))
logger.info("Kill me with 'kill {}'".format(os.getpid()))
self.server.join() # this function will never return tensorflow#4713
raise RuntimeError("This is a bug. Server.join() for should never return!")
@HIDE_DOC
def initialize(self, session_creator, session_init):
if not isinstance(session_creator, NewSessionCreator) or \
session_creator.user_provided_config:
raise ValueError(
"You are not allowed to set session_creator or session_config for distributed training! "
"To use a custom session config, pass it to tf.train.Server.")
super(DistributedTrainerBase, self).initialize(
get_distributed_session_creator(self.server), session_init)
# This is slow. deprecated in favor of horovod
class DistributedTrainerParameterServer(DistributedTrainerBase):
__doc__ = DistributedParameterServerBuilder.__doc__
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, server, caching_device='cpu'):
"""
Args:
gpus ([int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
caching_device (str): either 'cpu' or 'gpu'. The device to cache variables copied from PS
"""
super(DistributedTrainerParameterServer, self).__init__(gpus, server)
assert self.job_name in ['ps', 'worker'], self.job_name
if self.job_name == 'ps':
self.join()
self._builder = DistributedParameterServerBuilder(gpus, server, caching_device)
self.is_chief = self._builder.is_chief
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
assert isinstance(input, FeedfreeInput), input
self.train_op = self._builder.build(
self._make_get_grad_fn(input, get_cost_fn, get_opt_fn), get_opt_fn)
return []
# This is slow. deprecated in favor of horovod
class DistributedTrainerReplicated(DistributedTrainerBase):
__doc__ = DistributedReplicatedBuilder.__doc__
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, server):
"""
Args:
gpus (list[int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
"""
super(DistributedTrainerReplicated, self).__init__(gpus, server)
assert self.job_name in ['ps', 'worker'], self.job_name
if self.job_name == 'ps':
self.join()
self._builder = DistributedReplicatedBuilder(gpus, server)
self.is_chief = self._builder.is_chief
def _setup_input(self, input_signature, input):
with override_to_local_variable():
get_global_step_var() # gs should be local
# input source may create variables (queue size summary)
# TODO This is not good because we don't know from here
# whether something should be global or local. We now assume
# they should be local.
assert not input.setup_done()
return input.setup(input_signature)
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
assert isinstance(input, FeedfreeInput), input
self.train_op, initial_sync_op, model_sync_op = self._builder.build(
self._make_get_grad_fn(input, get_cost_fn, get_opt_fn), get_opt_fn)
callbacks = []
# Initial syncing vars from PS
cb = RunOp(lambda: initial_sync_op,
run_before=True, run_as_trigger=False, verbose=True)
cb.chief_only = False
callbacks.append(cb)
# Sync model_variables to PS, only chief needs to do this
if model_sync_op:
cb = RunOp(lambda: model_sync_op,
run_before=False, run_as_trigger=True, verbose=True)
logger.warn("For efficiency, local MODEL_VARIABLES are only synced to PS once "
"every epoch. Be careful if you save the model more frequently than this.")
callbacks.append(cb)
return callbacks
@property
def _main_tower_vs_name(self):
return "tower0"
class HorovodTrainer(SingleCostTrainer):
"""
Horovod trainer, support both multi-GPU and distributed training.
To use for multi-GPU training:
.. code-block:: bash
# First, change trainer to HorovodTrainer(), then
CUDA_VISIBLE_DEVICES=0,1,2,3 NCCL_DEBUG=INFO mpirun -np 4 --output-filename mylog python train.py
To use for distributed training:
.. code-block:: bash
# First, change trainer to HorovodTrainer(), then
mpirun -np 8 -H server1:4,server2:4 \\
-bind-to none -map-by slot \\
--output-filename mylog -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH \\
python train.py
# Add other environment variables you need by -x, e.g. PYTHONPATH, PATH.
# If using all GPUs, you can always skip the `CUDA_VISIBLE_DEVICES` option.
# There are other MPI options that can potentially improve performance especially on special hardwares.
Horovod can also be launched without MPI. See
`its documentation <https://github.com/horovod/horovod#running-horovod>`_
for more details.
Note:
1. To reach the maximum speed in your system, there are many options to tune
for Horovod installation and in the MPI command line.
See Horovod docs for details.
2. Due to a TF bug (#8136), you must not initialize CUDA context before the trainer starts training.
Therefore TF functions like `is_gpu_available()` or `list_local_devices()`
must be avoided.
You can, however, use `tf.config.experimental.list_physical_devices('GPU')`, introduced in TF 1.14.
3. Horovod supports both MPI and gloo. There are a few drawbacks of the MPI backend:
+ MPI does not like `fork()`. If your code (e.g. dataflow) contains multiprocessing, it may cause problems.
+ MPI sometimes fails to kill all processes in the end. Be sure to check it afterwards.
4. Keep in mind that there is one process running the script per GPU, therefore:
+ Make sure your InputSource has reasonable randomness.
+ If your data processing is heavy, doing it in a single dedicated process might be
a better choice than doing them repeatedly in each process.
+ You need to make sure log directories in each process won't conflict.
You can set it only for the chief process, or set a different one for each process.
+ Callbacks have an option to be run only in the chief process, or in all processes.
See :meth:`Callback.set_chief_only()`. Most callbacks have a reasonable
default already, but certain callbacks may need your customization.
Report an issue if you find any bad defaults.
+ You can use Horovod API such as `hvd.rank()` to know which process you are and choose
different code path. Chief process has rank 0.
5. Due to these caveats, see
`ResNet-Horovod <https://github.com/tensorpack/benchmarks/tree/master/ResNet-Horovod>`_
for a full example which has handled these common issues.
This example can train ImageNet in roughly an hour following the paper's setup.
Attributes:
BROADCAST_EVERY_EPOCH (bool):
Whether to broadcast the variables every epoch.
Theoretically this is a no-op (because the variables
are supposed to be in-sync).
But this cheap operation may help prevent certain numerical issues in practice.
Note that in cases such as BatchNorm, the variables may not be in sync.
"""
def __init__(self, average=True, compression=None):
"""
Args:
average (bool): whether to average or sum the gradients across processes.
compression: `hvd.Compression.fp16` or `hvd.Compression.none`
"""
if 'pyarrow' in sys.modules:
logger.warn("Horovod and pyarrow may conflict due to pyarrow bugs.")
# lazy import
import horovod.tensorflow as hvd
import horovod
hvd_version = tuple(map(int, horovod.__version__.split('.')[:3]))
self.hvd = hvd
hvd.init()
self.is_chief = hvd.rank() == 0
self._local_rank = hvd.local_rank()
self._rank = hvd.rank()
self._average = average
self._compression = compression
self._has_compression = hvd_version >= (0, 15, 0)
logger.info("[HorovodTrainer] local rank={}".format(self._local_rank))
super(HorovodTrainer, self).__init__()
self.BROADCAST_EVERY_EPOCH = True
def mpi_enabled(self):
"""
Returns:
bool: whether hvd is currently running under MPI
"""
try:
return self.hvd.mpi_enabled()
except AttributeError:
return False
def allreduce(self, grads):
if self.hvd.size() == 1:
return grads
averaged_gradients = []
with tf.name_scope("AllReduce"):
for grad, var in grads:
if grad is not None:
if self._compression is not None and self._has_compression:
avg_grad = self.hvd.allreduce(grad, average=self._average, compression=self._compression)
else:
avg_grad = self.hvd.allreduce(grad, average=self._average)
averaged_gradients.append((avg_grad, var))
else:
averaged_gradients.append((None, var))
return averaged_gradients
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
with TrainTowerContext(''):
grads = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)()
grads = self.allreduce(grads)
opt = get_opt_fn()
self.train_op = opt.apply_gradients(grads, name='train_op')
cb = CallbackFactory(
before_train=self.broadcast,
trigger=self.broadcast if self.BROADCAST_EVERY_EPOCH else None
).set_chief_only(False)
return [cb]
def broadcast(self, _):
logger.info("Running broadcast ...")
# the op will be created in initialize()
self.sess.run(self._broadcast_op)
@HIDE_DOC
def initialize(self, session_creator, session_init):
# broadcast_op should be the last setup_graph: it needs to be created
# "right before" the graph is finalized,
# because it needs to capture all the variables (which may be created by callbacks).
self._broadcast_op = self.hvd.broadcast_global_variables(0)
# it's important that our NewSessionCreator does not finalize the graph
if not isinstance(session_creator, NewSessionCreator):
raise ValueError(
"session_creator has to be `NewSessionCreator` for horovod/byteps training! ")
# NOTE It will fail if GPU was already detected before initializing the session
session_creator.config.gpu_options.visible_device_list = str(self._local_rank)
try:
session_creator.config.inter_op_parallelism_threads = mp.cpu_count() // self.hvd.local_size()
except AttributeError: # old horovod does not have local_size
pass
super(HorovodTrainer, self).initialize(session_creator, session_init)
# This broadcast belongs to the "intialize" stage
# It should not be delayed to the "before_train" stage.
# TODO:
# 1. a allgather helper to concat strings
# 2. check variables on each rank match each other, print warnings, and broadcast the common set.
if self.is_chief:
logger.info("Broadcasting initialized variables ...")
else:
logger.info("Rank {} waiting for initialization broadcasting ...".format(self._rank))
self.sess.run(self._broadcast_op)
class BytePSTrainer(HorovodTrainer):
"""
BytePS trainer. Supports both multi-GPU and distributed training.
It achieves better scalability than horovod in distributed training, if the model is communication
intensive and you have properly set up the machines following its
`best practices <https://github.com/bytedance/byteps/blob/master/docs/best-practice.md>`_
which requires a few extra bandwidth servers than horovod.
To use it, switch the trainer, and refer to BytePS documentation on how to
launch server/scheduler/workers.
Attributes:
hvd (module): the byteps module that contains horovod-compatible APIs
like `rank(),size()`.
This attribute exists so that downstream code that uses these APIs
does not need to worry about which library is being used under the hood.
"""
def __init__(self, average=True):
"""
Args:
average (bool): whether to average or sum the gradients across processes.
"""
import byteps.tensorflow as bps
self.hvd = bps # BytePS has the same interface as Horovod
self.hvd.allreduce = bps.push_pull # https://github.com/bytedance/byteps/issues/8
assert os.environ.get("DMLC_ROLE", None) == "worker"
assert "DMLC_WORKER_ID" in os.environ and "DMLC_NUM_WORKER" in os.environ
bps.init()
self.is_chief = bps.rank() == 0
self._local_rank = bps.local_rank()
self._rank = bps.rank()
self._average = average
self._compression = None
self._has_compression = False
logger.info("[BytePSTrainer] local rank={}".format(self._local_rank))
SingleCostTrainer.__init__(self)
def mpi_enabled(self):
"""
Returns:
bool: whether hvd is currently running under MPI
"""
return False
| 21,673 | 39.062847 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/interface.py | # File: interface.py
from ..compat import tfv1
from ..input_source import FeedInput, InputSource, QueueInput, StagingInput
from ..utils import logger
from ..compat import is_tfv2
from .config import TrainConfig
from .tower import SingleCostTrainer
from .trainers import SimpleTrainer
__all__ = ['launch_train_with_config']
def apply_default_prefetch(input_source_or_dataflow, trainer):
"""
Apply a set of default rules to make a fast :class:`InputSource`.
Args:
input_source_or_dataflow(InputSource | DataFlow):
trainer (Trainer):
Returns:
InputSource
"""
if not isinstance(input_source_or_dataflow, InputSource):
# to mimic same behavior of the old trainer interface
if type(trainer) == SimpleTrainer:
input = FeedInput(input_source_or_dataflow)
else:
logger.info("Automatically applying QueueInput on the DataFlow.")
input = QueueInput(input_source_or_dataflow)
else:
input = input_source_or_dataflow
if hasattr(trainer, 'devices'):
towers = trainer.devices
if len(towers) > 1: # seem to only help on >1 GPUs
assert not isinstance(trainer, SimpleTrainer)
if isinstance(input, QueueInput):
logger.info("Automatically applying StagingInput on the DataFlow.")
input = StagingInput(input)
return input
def launch_train_with_config(config, trainer):
"""
Train with a :class:`TrainConfig` and a :class:`Trainer`, to
present the simple and old training interface. It basically does the following
3 things (and you can easily do them by yourself if you need more control):
1. Setup the input with automatic prefetching heuristics,
from `config.data` or `config.dataflow`.
2. Call `trainer.setup_graph` with the input as well as `config.model`.
3. Call `trainer.train` with rest of the attributes of config.
See the `related tutorial
<https://tensorpack.readthedocs.io/tutorial/training-interface.html#with-modeldesc-and-trainconfig>`_
to learn more.
Args:
config (TrainConfig):
trainer (Trainer): an instance of :class:`SingleCostTrainer`.
Example:
.. code-block:: python
launch_train_with_config(
config, SyncMultiGPUTrainerParameterServer(8, ps_device='gpu'))
"""
if is_tfv2():
tfv1.disable_eager_execution()
assert isinstance(trainer, SingleCostTrainer), trainer
assert isinstance(config, TrainConfig), config
assert config.model is not None
assert config.dataflow is not None or config.data is not None
model = config.model
input = config.data or config.dataflow
input = apply_default_prefetch(input, trainer)
# This is the only place where the `ModelDesc` abstraction is useful.
# We should gradually stay away from this unuseful abstraction.
# TowerFunc is a better abstraction (similar to tf.function in the future)
trainer.setup_graph(
model.get_input_signature(), input,
model.build_graph, model.get_optimizer)
_check_unused_regularization()
trainer.train_with_defaults(
callbacks=config.callbacks,
monitors=config.monitors,
session_creator=config.session_creator,
session_init=config.session_init,
steps_per_epoch=config.steps_per_epoch,
starting_epoch=config.starting_epoch,
max_epoch=config.max_epoch,
extra_callbacks=config.extra_callbacks)
def _check_unused_regularization():
coll = tfv1.get_collection(tfv1.GraphKeys.REGULARIZATION_LOSSES)
unconsumed_reg = []
for c in coll:
if len(c.consumers()) == 0:
unconsumed_reg.append(c)
if unconsumed_reg:
logger.warn("The following tensors appear in REGULARIZATION_LOSSES collection but have no "
"consumers! You may have forgotten to add regularization to total cost.")
logger.warn("Unconsumed regularization: {}".format(', '.join([x.name for x in unconsumed_reg])))
| 4,079 | 35.428571 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/model_desc.py | # File: model_desc.py
import tensorflow as tf
from ..utils.argtools import memoized_method
from ..tfutils.common import get_op_tensor_name
from ..tfutils.tower import get_current_tower_context
from ..compat import backport_tensor_spec, tfv1
TensorSpec = backport_tensor_spec()
__all__ = ['ModelDesc', 'ModelDescBase']
class ModelDescBase(object):
"""
Base class for a model description.
It is used for the simple training interface described in
`Training Interface Tutorial <https://tensorpack.readthedocs.io/tutorial/training-interface.html>`_.
Subclass is expected to implement :meth:`inputs` and :meth:`build_graph`, as they
together define a tower function.
"""
@memoized_method
def get_input_signature(self):
"""
Returns:
A list of :class:`tf.TensorSpec`, which describes the inputs of this model.
The result is cached for each instance of :class:`ModelDescBase`.
"""
with tf.Graph().as_default() as G: # create these placeholder in a temporary graph
inputs = self.inputs()
assert isinstance(inputs, (list, tuple)), \
"ModelDesc.inputs() should return a list of tf.TensorSpec objects! Got {} instead.".format(str(inputs))
if isinstance(inputs[0], tf.Tensor):
for p in inputs:
assert "Placeholder" in p.op.type, \
"inputs() have to return TensorSpec or placeholders! Found {} instead.".format(p)
assert p.graph == G, "Placeholders returned by inputs() should be created inside inputs()!"
return [TensorSpec(shape=p.shape, dtype=p.dtype, name=get_op_tensor_name(p.name)[0]) for p in inputs]
@property
def input_names(self):
"""
list[str]: the names of all the inputs.
"""
return [k.name for k in self.get_input_signature()]
def inputs(self):
"""
A subclass is expected to implement this method.
If returning placeholders,
the placeholders **have to** be created inside this method.
Don't return placeholders created in other places.
Also, users should never call this method by yourself.
Returns:
list[tf.TensorSpec or tf.placeholder].
"""
raise NotImplementedError()
def build_graph(self, *args):
"""
A subclass is expected to implement this method.
Build the whole symbolic graph.
This is supposed to be part of the "tower function" when used with :class:`TowerTrainer`.
Args:
args ([tf.Tensor]): tensors that matches the list of inputs defined by ``inputs()``.
Returns:
In general it returns nothing, but a subclass
may require it to return necessary information to build the trainer.
For example, `SingleCostTrainer` expect this method to return the cost tensor.
"""
raise NotImplementedError()
@property
def training(self):
"""
bool: whether the caller is under a training context or not.
"""
return get_current_tower_context().is_training
class ModelDesc(ModelDescBase):
"""
One subclass of :class:`ModelDescBase` with the assupmtion of
**single cost** and **single optimizer** training.
It has the following constraints in addition to :class:`ModelDescBase`:
1. `build_graph(...)` method should return a cost tensor when called under a training context.
The cost will be the final cost to be optimized by the optimizer.
Therefore it should include necessary regularization.
2. Subclass is expected to implement :meth:`optimizer()` method.
"""
@memoized_method
def get_optimizer(self):
"""
Return the memoized optimizer returned by `optimizer()`.
Users of :class:`ModelDesc` will need to implement `optimizer()`,
which will only be called once per each model.
Returns:
a :class:`tf.train.Optimizer` instance.
"""
ret = self.optimizer()
assert isinstance(ret, tfv1.train.Optimizer), \
"ModelDesc.optimizer() must return a tf.train.Optimizer! Got {} instead.".format(str(ret))
return ret
def optimizer(self):
"""
A subclass is expected to implement this method.
Returns:
a `tf.train.Optimizer` instance.
"""
raise NotImplementedError()
| 4,521 | 33.519084 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/concurrency.py | # File: concurrency.py
# Some code taken from zxytim
import sys
import atexit
import bisect
import multiprocessing as mp
import platform
import signal
import threading
import weakref
from contextlib import contextmanager
import six
from six.moves import queue
import subprocess
from . import logger
from .argtools import log_once
__all__ = ['StoppableThread', 'LoopThread', 'ShareSessionThread',
'ensure_proc_terminate',
'start_proc_mask_signal']
class StoppableThread(threading.Thread):
"""
A thread that has a 'stop' event.
"""
def __init__(self, evt=None):
"""
Args:
evt(threading.Event): if None, will create one.
"""
super(StoppableThread, self).__init__()
if evt is None:
evt = threading.Event()
self._stop_evt = evt
def stop(self):
""" Stop the thread"""
self._stop_evt.set()
def stopped(self):
"""
Returns:
bool: whether the thread is stopped or not
"""
return self._stop_evt.isSet()
def queue_put_stoppable(self, q, obj):
""" Put obj to queue, but will give up when the thread is stopped"""
while not self.stopped():
try:
q.put(obj, timeout=5)
break
except queue.Full:
pass
def queue_get_stoppable(self, q):
""" Take obj from queue, but will give up when the thread is stopped"""
while not self.stopped():
try:
return q.get(timeout=5)
except queue.Empty:
pass
class LoopThread(StoppableThread):
""" A pausable thread that simply runs a loop"""
def __init__(self, func, pausable=True):
"""
Args:
func: the function to run
"""
super(LoopThread, self).__init__()
self._func = func
self._pausable = pausable
if pausable:
self._lock = threading.Lock()
self.daemon = True
def run(self):
while not self.stopped():
if self._pausable:
self._lock.acquire()
self._lock.release()
self._func()
def pause(self):
""" Pause the loop """
assert self._pausable
self._lock.acquire()
def resume(self):
""" Resume the loop """
assert self._pausable
self._lock.release()
class ShareSessionThread(threading.Thread):
""" A wrapper around thread so that the thread
uses the default session at "start()" time.
"""
def __init__(self, th=None):
"""
Args:
th (threading.Thread or None):
"""
super(ShareSessionThread, self).__init__()
if th is not None:
assert isinstance(th, threading.Thread), th
self._th = th
self.name = th.name
self.daemon = th.daemon
@contextmanager
def default_sess(self):
if self._sess:
with self._sess.as_default():
yield self._sess
else:
logger.warn("ShareSessionThread {} wasn't under a default session!".format(self.name))
yield None
def start(self):
from ..compat import tfv1
self._sess = tfv1.get_default_session()
super(ShareSessionThread, self).start()
def run(self):
if not self._th:
raise NotImplementedError()
with self._sess.as_default():
self._th.run()
class DIE(object):
""" A placeholder class indicating end of queue """
pass
def ensure_proc_terminate(proc):
"""
Make sure processes terminate when main process exit.
Args:
proc (multiprocessing.Process or list)
"""
if isinstance(proc, list):
for p in proc:
ensure_proc_terminate(p)
return
def stop_proc_by_weak_ref(ref):
proc = ref()
if proc is None:
return
if not proc.is_alive():
return
proc.terminate()
proc.join()
assert isinstance(proc, mp.Process)
atexit.register(stop_proc_by_weak_ref, weakref.ref(proc))
def enable_death_signal(_warn=True):
"""
Set the "death signal" of the current process, so that
the current process will be cleaned with guarantee
in case the parent dies accidentally.
"""
if platform.system() != 'Linux':
return
try:
import prctl # pip install python-prctl
except ImportError:
if _warn:
log_once('"import prctl" failed! Install python-prctl so that processes can be cleaned with guarantee.',
'warn')
return
else:
assert hasattr(prctl, 'set_pdeathsig'), \
"prctl.set_pdeathsig does not exist! Note that you need to install 'python-prctl' instead of 'prctl'."
# is SIGHUP a good choice?
prctl.set_pdeathsig(signal.SIGHUP)
def is_main_thread():
if six.PY2:
return isinstance(threading.current_thread(), threading._MainThread)
else:
# a nicer solution with py3
return threading.current_thread() == threading.main_thread()
@contextmanager
def mask_sigint():
"""
Returns:
If called in main thread, returns a context where ``SIGINT`` is ignored, and yield True.
Otherwise yield False.
"""
if is_main_thread():
sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
yield True
signal.signal(signal.SIGINT, sigint_handler)
else:
yield False
def start_proc_mask_signal(proc):
"""
Start process(es) with SIGINT ignored.
Args:
proc: (mp.Process or list)
Note:
The signal mask is only applied when called from main thread.
"""
if not isinstance(proc, list):
proc = [proc]
with mask_sigint():
for p in proc:
if isinstance(p, mp.Process):
if sys.version_info < (3, 4) or mp.get_start_method() == 'fork':
log_once("""
Starting a process with 'fork' method is efficient but not safe and may cause deadlock or crash.
Use 'forkserver' or 'spawn' method instead if you run into such issues.
See https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods on how to set them.
""".replace("\n", ""),
'warn') # noqa
p.start()
def subproc_call(cmd, timeout=None):
"""
Execute a command with timeout, and return STDOUT and STDERR
Args:
cmd(str): the command to execute.
timeout(float): timeout in seconds.
Returns:
output(bytes), retcode(int). If timeout, retcode is -1.
"""
try:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT,
shell=True, timeout=timeout)
return output, 0
except subprocess.TimeoutExpired as e:
logger.warn("Command '{}' timeout!".format(cmd))
if e.output:
logger.warn(e.output.decode('utf-8'))
return e.output, -1
else:
return "", -1
except subprocess.CalledProcessError as e:
logger.warn("Command '{}' failed, return code={}".format(cmd, e.returncode))
logger.warn(e.output.decode('utf-8'))
return e.output, e.returncode
except Exception:
logger.warn("Command '{}' failed to run.".format(cmd))
return "", -2
class OrderedContainer(object):
"""
Like a queue, but will always wait to receive item with rank
(x+1) and produce (x+1) before producing (x+2).
Warning:
It is not thread-safe.
"""
def __init__(self, start=0):
"""
Args:
start(int): the starting rank.
"""
self.ranks = []
self.data = []
self.wait_for = start
def put(self, rank, val):
"""
Args:
rank(int): rank of th element. All elements must have different ranks.
val: an object
"""
idx = bisect.bisect(self.ranks, rank)
self.ranks.insert(idx, rank)
self.data.insert(idx, val)
def has_next(self):
if len(self.ranks) == 0:
return False
return self.ranks[0] == self.wait_for
def get(self):
assert self.has_next()
ret = self.data[0]
rank = self.ranks[0]
del self.ranks[0]
del self.data[0]
self.wait_for += 1
return rank, ret
class OrderedResultGatherProc(mp.Process):
"""
Gather indexed data from a data queue, and produce results with the
original index-based order.
"""
def __init__(self, data_queue, nr_producer, start=0):
"""
Args:
data_queue(mp.Queue): a queue which contains datapoints.
nr_producer(int): number of producer processes. This process will
terminate after receiving this many of :class:`DIE` sentinel.
start(int): the rank of the first object
"""
super(OrderedResultGatherProc, self).__init__()
self.data_queue = data_queue
self.ordered_container = OrderedContainer(start=start)
self.result_queue = mp.Queue()
self.nr_producer = nr_producer
def run(self):
nr_end = 0
try:
while True:
task_id, data = self.data_queue.get()
if task_id == DIE:
self.result_queue.put((task_id, data))
nr_end += 1
if nr_end == self.nr_producer:
return
else:
self.ordered_container.put(task_id, data)
while self.ordered_container.has_next():
self.result_queue.put(self.ordered_container.get())
except Exception as e:
import traceback
traceback.print_exc()
raise e
def get(self):
return self.result_queue.get()
| 9,998 | 26.852368 | 116 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/timer.py | # File: timer.py
import atexit
from collections import defaultdict
from contextlib import contextmanager
from time import perf_counter as timer # noqa
from . import logger
from .stats import StatCounter
__all__ = ['timed_operation', 'IterSpeedCounter', 'Timer']
@contextmanager
def timed_operation(msg, log_start=False):
"""
Surround a context with a timer.
Args:
msg(str): the log to print.
log_start(bool): whether to print also at the beginning.
Example:
.. code-block:: python
with timed_operation('Good Stuff'):
time.sleep(1)
Will print:
.. code-block:: python
Good stuff finished, time:1sec.
"""
assert len(msg)
if log_start:
logger.info('Start {} ...'.format(msg))
start = timer()
yield
msg = msg[0].upper() + msg[1:]
logger.info('{} finished, time:{:.4f} sec.'.format(
msg, timer() - start))
_TOTAL_TIMER_DATA = defaultdict(StatCounter)
@contextmanager
def total_timer(msg):
""" A context which add the time spent inside to the global TotalTimer. """
start = timer()
yield
t = timer() - start
_TOTAL_TIMER_DATA[msg].feed(t)
def print_total_timer():
"""
Print the content of the global TotalTimer, if it's not empty. This function will automatically get
called when program exits.
"""
if len(_TOTAL_TIMER_DATA) == 0:
return
for k, v in _TOTAL_TIMER_DATA.items():
logger.info("Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format(
k, v.sum, v.count, v.average))
atexit.register(print_total_timer)
class IterSpeedCounter(object):
""" Test how often some code gets reached.
Example:
Print the speed of the iteration every 100 times.
.. code-block:: python
speed = IterSpeedCounter(100)
for k in range(1000):
# do something
speed()
"""
def __init__(self, print_every, name=None):
"""
Args:
print_every(int): interval to print.
name(str): name to used when print.
"""
self.cnt = 0
self.print_every = int(print_every)
self.name = name if name else 'IterSpeed'
def reset(self):
self.start = timer()
def __call__(self):
if self.cnt == 0:
self.reset()
self.cnt += 1
if self.cnt % self.print_every != 0:
return
t = timer() - self.start
logger.info("{}: {:.2f} sec, {} times, {:.3g} sec/time".format(
self.name, t, self.cnt, t / self.cnt))
class Timer():
"""
A timer class which computes the time elapsed since the start/reset of the timer.
"""
def __init__(self):
self.reset()
def reset(self):
"""
Reset the timer.
"""
self._start = timer()
self._paused = False
self._total_paused = 0
def pause(self):
"""
Pause the timer.
"""
assert self._paused is False
self._paused = timer()
def is_paused(self):
return self._paused is not False
def resume(self):
"""
Resume the timer.
"""
assert self._paused is not False
self._total_paused += timer() - self._paused
self._paused = False
def seconds(self):
"""
Returns:
float: the total number of seconds since the start/reset of the timer, excluding the
time in between when the timer is paused.
"""
if self._paused:
self.resume()
self.pause()
return timer() - self._start - self._total_paused
| 3,746 | 23.019231 | 103 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/argtools.py | # File: argtools.py
import inspect
import functools
from . import logger
__all__ = ['map_arg', 'memoized', 'memoized_method', 'graph_memoized', 'shape2d', 'shape4d',
'memoized_ignoreargs', 'log_once']
def map_arg(**maps):
"""
Apply a mapping on certain argument before calling the original function.
Args:
maps (dict): {argument_name: map_func}
"""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# getcallargs was deprecated since 3.5
sig = inspect.signature(func)
argmap = sig.bind_partial(*args, **kwargs).arguments
for k, map_func in maps.items():
if k in argmap:
argmap[k] = map_func(argmap[k])
return func(**argmap)
return wrapper
return deco
memoized = functools.lru_cache(maxsize=None)
""" Alias to :func:`functools.lru_cache`
WARNING: memoization will keep keys and values alive!
"""
def graph_memoized(func):
"""
Like memoized, but keep one cache per default graph.
"""
# TODO it keeps the graph alive
from ..compat import tfv1
GRAPH_ARG_NAME = '__IMPOSSIBLE_NAME_FOR_YOU__'
@memoized
def func_with_graph_arg(*args, **kwargs):
kwargs.pop(GRAPH_ARG_NAME)
return func(*args, **kwargs)
@functools.wraps(func)
def wrapper(*args, **kwargs):
assert GRAPH_ARG_NAME not in kwargs, "No Way!!"
graph = tfv1.get_default_graph()
kwargs[GRAPH_ARG_NAME] = graph
return func_with_graph_arg(*args, **kwargs)
return wrapper
_MEMOIZED_NOARGS = {}
def memoized_ignoreargs(func):
"""
A decorator. It performs memoization ignoring the arguments used to call
the function.
"""
def wrapper(*args, **kwargs):
if func not in _MEMOIZED_NOARGS:
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper
def shape2d(a):
"""
Ensure a 2D shape.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 2. if ``a`` is a int, return ``[a, a]``.
"""
if type(a) == int:
return [a, a]
if isinstance(a, (list, tuple)):
assert len(a) == 2
return list(a)
raise RuntimeError("Illegal shape: {}".format(a))
def get_data_format(data_format, keras_mode=True):
if keras_mode:
dic = {'NCHW': 'channels_first', 'NHWC': 'channels_last'}
else:
dic = {'channels_first': 'NCHW', 'channels_last': 'NHWC'}
ret = dic.get(data_format, data_format)
if ret not in dic.values():
raise ValueError("Unknown data_format: {}".format(data_format))
return ret
def shape4d(a, data_format='NHWC'):
"""
Ensuer a 4D shape, to use with 4D symbolic functions.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 4. if ``a`` is a int, return ``[1, a, a, 1]``
or ``[1, 1, a, a]`` depending on data_format.
"""
s2d = shape2d(a)
if get_data_format(data_format, False) == 'NHWC':
return [1] + s2d + [1]
else:
return [1, 1] + s2d
@memoized
def log_once(message, func='info'):
"""
Log certain message only once. Call this function more than one times with
the same message will result in no-op.
Args:
message(str): message to log
func(str): the name of the logger method. e.g. "info", "warn", "error".
"""
getattr(logger, func)(message)
def call_only_once(func):
"""
Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
# cannot use hasattr here, because hasattr tries to getattr, which
# fails if func is a property
assert func.__name__ in dir(self), "call_only_once can only be used on method or property!"
if not hasattr(self, '_CALL_ONLY_ONCE_CACHE'):
cache = self._CALL_ONLY_ONCE_CACHE = set()
else:
cache = self._CALL_ONLY_ONCE_CACHE
cls = type(self)
# cannot use ismethod(), because decorated method becomes a function
is_method = inspect.isfunction(getattr(cls, func.__name__))
assert func not in cache, \
"{} {}.{} can only be called once per object!".format(
'Method' if is_method else 'Property',
cls.__name__, func.__name__)
cache.add(func)
return func(*args, **kwargs)
return wrapper
def memoized_method(func):
"""
A decorator that performs memoization on methods. It stores the cache on the object instance itself.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
assert func.__name__ in dir(self), "memoized_method can only be used on method!"
if not hasattr(self, '_MEMOIZED_CACHE'):
cache = self._MEMOIZED_CACHE = {}
else:
cache = self._MEMOIZED_CACHE
key = (func, ) + args[1:] + tuple(kwargs)
ret = cache.get(key, None)
if ret is not None:
return ret
value = func(*args, **kwargs)
cache[key] = value
return value
return wrapper
if __name__ == '__main__':
class A():
def __init__(self):
self._p = 0
@call_only_once
def f(self, x):
print(x)
@property
def p(self):
return self._p
@p.setter
@call_only_once
def p(self, val):
self._p = val
a = A()
a.f(1)
b = A()
b.f(2)
b.f(1)
print(b.p)
print(b.p)
b.p = 2
print(b.p)
b.p = 3
print(b.p)
| 5,918 | 24.734783 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/palette.py | # File: palette.py
import numpy as np
__all__ = ['PALETTE_RGB']
PALETTE_HEX = [
"#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059",
"#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87",
"#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80",
"#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100",
"#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F",
"#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09",
"#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66",
"#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C",
"#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81",
"#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00",
"#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700",
"#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329",
"#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C",
"#83AB58", "#001C1E", "#D1F7CE", "#004B28", "#C8D0F6", "#A3A489", "#806C66", "#222800",
"#BF5650", "#E83000", "#66796D", "#DA007C", "#FF1A59", "#8ADBB4", "#1E0200", "#5B4E51",
"#C895C5", "#320033", "#FF6832", "#66E1D3", "#CFCDAC", "#D0AC94",
"#7ED379", "#012C58"]
DETECTRON_PALETTE = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32).reshape(-1, 3) * 255
def _parse_hex_color(s):
r = int(s[1:3], 16)
g = int(s[3:5], 16)
b = int(s[5:7], 16)
return (r, g, b)
# PALETTE_RGB = np.asarray(
# list(map(_parse_hex_color, PALETTE_HEX)),
# dtype='int32')
# This seems more beautiful
PALETTE_RGB = DETECTRON_PALETTE
| 4,457 | 33.828125 | 117 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/fs.py | # File: fs.py
import errno
import os
import tqdm
from six.moves import urllib
from . import logger
from .utils import execute_only_once
__all__ = ['mkdir_p', 'download', 'recursive_walk', 'get_dataset_path', 'normpath']
def mkdir_p(dirname):
""" Like "mkdir -p", make a dir recursively, but do nothing if the dir exists
Args:
dirname(str):
"""
assert dirname is not None
if dirname == '' or os.path.isdir(dirname):
return
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
def download(url, dir, filename=None, expect_size=None):
"""
Download URL to a directory.
Will figure out the filename automatically from URL, if not given.
"""
mkdir_p(dir)
if filename is None:
filename = url.split('/')[-1]
fpath = os.path.join(dir, filename)
if os.path.isfile(fpath):
if expect_size is not None and os.stat(fpath).st_size == expect_size:
logger.info("File {} exists! Skip download.".format(filename))
return fpath
else:
logger.warn("File {} exists. Will overwrite with a new download!".format(filename))
def hook(t):
last_b = [0]
def inner(b, bsize, tsize=None):
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
try:
with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t))
statinfo = os.stat(fpath)
size = statinfo.st_size
except IOError:
logger.error("Failed to download {}".format(url))
raise
assert size > 0, "Downloaded an empty file from {}!".format(url)
if expect_size is not None and size != expect_size:
logger.error("File downloaded from {} does not match the expected size!".format(url))
logger.error("You may have downloaded a broken file, or the upstream may have modified the file.")
# TODO human-readable size
logger.info('Succesfully downloaded ' + filename + ". " + str(size) + ' bytes.')
return fpath
def recursive_walk(rootdir):
"""
Yields:
str: All files in rootdir, recursively.
"""
for r, dirs, files in os.walk(rootdir):
for f in files:
yield os.path.join(r, f)
def get_dataset_path(*args):
"""
Get the path to some dataset under ``$TENSORPACK_DATASET``.
Args:
args: strings to be joined to form path.
Returns:
str: path to the dataset.
"""
d = os.environ.get('TENSORPACK_DATASET', None)
if d is None:
d = os.path.join(os.path.expanduser('~'), 'tensorpack_data')
if execute_only_once():
logger.warn("Env var $TENSORPACK_DATASET not set, using {} for datasets.".format(d))
if not os.path.isdir(d):
mkdir_p(d)
logger.info("Created the directory {}.".format(d))
assert os.path.isdir(d), d
return os.path.join(d, *args)
def normpath(path):
"""
Normalizes a path to a folder by taking into consideration remote storages like Cloud storaged
referenced by '://' at the beginning of the path.
Args:
args: path to be normalized.
Returns:
str: normalized path.
"""
return path if '://' in path else os.path.normpath(path)
if __name__ == '__main__':
download('http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz', '.')
| 3,592 | 27.744 | 106 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/stats.py | # File: stats.py
import numpy as np
__all__ = ['StatCounter', 'BinaryStatistics', 'RatioCounter', 'Accuracy',
'OnlineMoments']
class StatCounter(object):
""" A simple counter"""
def __init__(self):
self.reset()
def feed(self, v):
"""
Args:
v(float or np.ndarray): has to be the same shape between calls.
"""
self._values.append(v)
def reset(self):
self._values = []
@property
def count(self):
return len(self._values)
@property
def average(self):
assert len(self._values)
return np.mean(self._values)
@property
def sum(self):
assert len(self._values)
return np.sum(self._values)
@property
def max(self):
assert len(self._values)
return max(self._values)
@property
def min(self):
assert len(self._values)
return min(self._values)
def samples(self):
"""
Returns all samples.
"""
return self._values
class RatioCounter(object):
""" A counter to count ratio of something. """
def __init__(self):
self.reset()
def reset(self):
self._tot = 0
self._cnt = 0
def feed(self, count, total=1):
"""
Args:
cnt(int): the count of some event of interest.
tot(int): the total number of events.
"""
self._tot += total
self._cnt += count
@property
def ratio(self):
if self._tot == 0:
return 0
return self._cnt * 1.0 / self._tot
@property
def total(self):
"""
Returns:
int: the total
"""
return self._tot
@property
def count(self):
"""
Returns:
int: the total
"""
return self._cnt
class Accuracy(RatioCounter):
""" A RatioCounter with a fancy name """
@property
def accuracy(self):
return self.ratio
class BinaryStatistics(object):
"""
Statistics for binary decision,
including precision, recall, false positive, false negative
"""
def __init__(self):
self.reset()
def reset(self):
self.nr_pos = 0 # positive label
self.nr_neg = 0 # negative label
self.nr_pred_pos = 0
self.nr_pred_neg = 0
self.corr_pos = 0 # correct predict positive
self.corr_neg = 0 # correct predict negative
def feed(self, pred, label):
"""
Args:
pred (np.ndarray): binary array.
label (np.ndarray): binary array of the same size.
"""
assert pred.shape == label.shape, "{} != {}".format(pred.shape, label.shape)
self.nr_pos += (label == 1).sum()
self.nr_neg += (label == 0).sum()
self.nr_pred_pos += (pred == 1).sum()
self.nr_pred_neg += (pred == 0).sum()
self.corr_pos += ((pred == 1) & (pred == label)).sum()
self.corr_neg += ((pred == 0) & (pred == label)).sum()
@property
def precision(self):
if self.nr_pred_pos == 0:
return 0
return self.corr_pos * 1. / self.nr_pred_pos
@property
def recall(self):
if self.nr_pos == 0:
return 0
return self.corr_pos * 1. / self.nr_pos
@property
def false_positive(self):
if self.nr_pred_pos == 0:
return 0
return 1 - self.precision
@property
def false_negative(self):
if self.nr_pos == 0:
return 0
return 1 - self.recall
class OnlineMoments(object):
"""Compute 1st and 2nd moments online (to avoid storing all elements).
See algorithm at: https://www.wikiwand.com/en/Algorithms_for_calculating_variance#/Online_algorithm
"""
def __init__(self):
self._mean = 0
self._M2 = 0
self._n = 0
def feed(self, x):
"""
Args:
x (float or np.ndarray): must have the same shape.
"""
self._n += 1
delta = x - self._mean
self._mean += delta * (1.0 / self._n)
delta2 = x - self._mean
self._M2 += delta * delta2
@property
def mean(self):
return self._mean
@property
def variance(self):
return self._M2 / (self._n - 1)
@property
def std(self):
return np.sqrt(self.variance)
| 4,416 | 21.651282 | 103 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/develop.py | # File: develop.py
""" Utilities for developers only.
These are not visible to users (not automatically imported). And should not
appeared in docs."""
import functools
import importlib
import os
import types
from collections import defaultdict
from datetime import datetime
import six
from . import logger
__all__ = []
def create_dummy_class(klass, dependency):
"""
When a dependency of a class is not available, create a dummy class which throws ImportError when used.
Args:
klass (str): name of the class.
dependency (str): name of the dependency.
Returns:
class: a class object
"""
assert not building_rtfd()
class _DummyMetaClass(type):
# throw error on class attribute access
def __getattr__(_, __):
raise AttributeError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
@six.add_metaclass(_DummyMetaClass)
class _Dummy(object):
# throw error on constructor
def __init__(self, *args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
return _Dummy
def create_dummy_func(func, dependency):
"""
When a dependency of a function is not available, create a dummy function which throws ImportError when used.
Args:
func (str): name of the function.
dependency (str or list[str]): name(s) of the dependency.
Returns:
function: a function object
"""
assert not building_rtfd()
if isinstance(dependency, (list, tuple)):
dependency = ','.join(dependency)
def _dummy(*args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, func))
return _dummy
def building_rtfd():
"""
Returns:
bool: if the library is being imported to generate docs now.
"""
return os.environ.get('READTHEDOCS') == 'True' \
or os.environ.get('DOC_BUILDING')
_DEPRECATED_LOG_NUM = defaultdict(int)
def log_deprecated(name="", text="", eos="", max_num_warnings=None):
"""
Log deprecation warning.
Args:
name (str): name of the deprecated item.
text (str, optional): information about the deprecation.
eos (str, optional): end of service date such as "YYYY-MM-DD".
max_num_warnings (int, optional): the maximum number of times to print this warning
"""
assert name or text
if eos:
eos = "after " + datetime(*map(int, eos.split("-"))).strftime("%d %b")
if name:
if eos:
warn_msg = "%s will be deprecated %s. %s" % (name, eos, text)
else:
warn_msg = "%s was deprecated. %s" % (name, text)
else:
warn_msg = text
if eos:
warn_msg += " Legacy period ends %s" % eos
if max_num_warnings is not None:
if _DEPRECATED_LOG_NUM[warn_msg] >= max_num_warnings:
return
_DEPRECATED_LOG_NUM[warn_msg] += 1
logger.warn("[Deprecated] " + warn_msg)
def deprecated(text="", eos="", max_num_warnings=None):
"""
Args:
text, eos, max_num_warnings: same as :func:`log_deprecated`.
Returns:
a decorator which deprecates the function.
Example:
.. code-block:: python
@deprecated("Explanation of what to do instead.", "2017-11-4")
def foo(...):
pass
"""
def get_location():
import inspect
frame = inspect.currentframe()
if frame:
callstack = inspect.getouterframes(frame)[-1]
return '%s:%i' % (callstack[1], callstack[2])
else:
stack = inspect.stack(0)
entry = stack[2]
return '%s:%i' % (entry[1], entry[2])
def deprecated_inner(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
name = "{} [{}]".format(func.__name__, get_location())
log_deprecated(name, text, eos, max_num_warnings=max_num_warnings)
return func(*args, **kwargs)
return new_func
return deprecated_inner
def HIDE_DOC(func):
func.__HIDE_SPHINX_DOC__ = True
return func
class LazyLoader(types.ModuleType):
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
| 5,284 | 28.361111 | 113 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/utils.py | # File: utils.py
import inspect
import numpy as np
import re
import os
import sys
from contextlib import contextmanager
from datetime import datetime, timedelta
from tqdm import tqdm
from . import logger
from .concurrency import subproc_call
__all__ = ['change_env',
'get_rng',
'fix_rng_seed',
'get_tqdm',
'execute_only_once',
'humanize_time_delta'
]
def humanize_time_delta(sec):
"""Humanize timedelta given in seconds
Args:
sec (float): time difference in seconds. Must be positive.
Returns:
str - time difference as a readable string
Example:
.. code-block:: python
print(humanize_time_delta(1)) # 1 second
print(humanize_time_delta(60 + 1)) # 1 minute 1 second
print(humanize_time_delta(87.6)) # 1 minute 27 seconds
print(humanize_time_delta(0.01)) # 0.01 seconds
print(humanize_time_delta(60 * 60 + 1)) # 1 hour 1 second
print(humanize_time_delta(60 * 60 * 24 + 1)) # 1 day 1 second
print(humanize_time_delta(60 * 60 * 24 + 60 * 2 + 60*60*9 + 3)) # 1 day 9 hours 2 minutes 3 seconds
"""
if sec < 0:
logger.warn("humanize_time_delta() obtains negative seconds!")
return "{:.3g} seconds".format(sec)
if sec == 0:
return "0 second"
time = datetime(2000, 1, 1) + timedelta(seconds=int(sec))
units = ['day', 'hour', 'minute', 'second']
vals = [int(sec // 86400), time.hour, time.minute, time.second]
if sec < 60:
vals[-1] = sec
def _format(v, u):
return "{:.3g} {}{}".format(v, u, "s" if v > 1 else "")
ans = []
for v, u in zip(vals, units):
if v > 0:
ans.append(_format(v, u))
return " ".join(ans)
@contextmanager
def change_env(name, val):
"""
Args:
name(str): name of the env var
val(str or None): the value, or set to None to clear the env var.
Returns:
a context where the environment variable ``name`` being set to
``val``. It will be set back after the context exits.
"""
oldval = os.environ.get(name, None)
if val is None:
try:
del os.environ[name]
except KeyError:
pass
else:
os.environ[name] = val
yield
if oldval is None:
try:
del os.environ[name]
except KeyError:
pass
else:
os.environ[name] = oldval
_RNG_SEED = None
def fix_rng_seed(seed):
"""
Call this function at the beginning of program to fix rng seed within tensorpack.
Args:
seed (int):
Note:
See https://github.com/tensorpack/tensorpack/issues/196.
Example:
Fix random seed in both tensorpack and tensorflow.
.. code-block:: python
seed = 42
utils.fix_rng_seed(seed)
tesnorflow.set_random_seed(seed)
# run trainer
"""
global _RNG_SEED
_RNG_SEED = int(seed)
def get_rng(obj=None):
"""
Get a good RNG seeded with time, pid and the object.
Args:
obj: some object to use to generate random seed.
Returns:
np.random.RandomState: the RNG.
"""
seed = (id(obj) + os.getpid() +
int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295
if _RNG_SEED is not None:
seed = _RNG_SEED
return np.random.RandomState(seed)
_EXECUTE_HISTORY = set()
def execute_only_once():
"""
Each called in the code to this function is guaranteed to return True the
first time and False afterwards.
Returns:
bool: whether this is the first time this function gets called from this line of code.
Example:
.. code-block:: python
if execute_only_once():
# do something only once
"""
f = inspect.currentframe().f_back
ident = (f.f_code.co_filename, f.f_lineno)
if ident in _EXECUTE_HISTORY:
return False
_EXECUTE_HISTORY.add(ident)
return True
def _pick_tqdm_interval(file):
# Heuristics to pick a update interval for progress bar that's nice-looking for users.
isatty = file.isatty()
# Jupyter notebook should be recognized as tty.
try:
from ipykernel import iostream
if isinstance(file, iostream.OutStream):
isatty = True
except ImportError:
pass
if isatty:
return 0.5
else:
# When run under mpirun/slurm, isatty is always False.
# Here we apply some hacky heuristics for slurm.
if 'SLURM_JOB_ID' in os.environ:
if int(os.environ.get('SLURM_JOB_NUM_NODES', 1)) > 1:
# multi-machine job, probably not interactive
return 60
else:
# possibly interactive, so let's be conservative
return 15
if 'OMPI_COMM_WORLD_SIZE' in os.environ:
return 60
# If not a tty, don't refresh progress bar that often
return 180
def get_tqdm_kwargs(**kwargs):
"""
Return default arguments to be used with tqdm.
Args:
kwargs: extra arguments to be used.
Returns:
dict:
"""
default = dict(
smoothing=0.5,
dynamic_ncols=True,
ascii=True,
bar_format='{l_bar}{bar}|{n_fmt}/{total_fmt}[{elapsed}<{remaining},{rate_noinv_fmt}]'
)
try:
# Use this env var to override the refresh interval setting
interval = float(os.environ['TENSORPACK_PROGRESS_REFRESH'])
except KeyError:
interval = _pick_tqdm_interval(kwargs.get('file', sys.stderr))
default['mininterval'] = interval
default.update(kwargs)
return default
def get_tqdm(*args, **kwargs):
""" Similar to :func:`tqdm.tqdm()`,
but use tensorpack's default options to have consistent style. """
return tqdm(*args, **get_tqdm_kwargs(**kwargs))
def find_library_full_path(name):
"""
Similar to `from ctypes.util import find_library`, but try
to return full path if possible.
"""
from ctypes.util import find_library
if os.name == "posix" and sys.platform == "darwin":
# on Mac, ctypes already returns full path
return find_library(name)
def _use_proc_maps(name):
"""
Find so from /proc/pid/maps
Only works with libraries that has already been loaded.
But this is the most accurate method -- it finds the exact library that's being used.
"""
procmap = os.path.join('/proc', str(os.getpid()), 'maps')
if not os.path.isfile(procmap):
return None
try:
with open(procmap, 'r') as f:
for line in f:
line = line.strip().split(' ')
sofile = line[-1]
basename = os.path.basename(sofile)
if 'lib' + name + '.so' in basename:
if os.path.isfile(sofile):
return os.path.realpath(sofile)
except IOError:
# can fail in certain environment (e.g. chroot)
# if the pids are incorrectly mapped
pass
def _use_ld(name):
"""
Find so with `ld -lname -Lpath`.
It will search for files in LD_LIBRARY_PATH, but not in ldconfig.
"""
cmd = "ld -t -l{} -o {}".format(name, os.devnull)
ld_lib_path = os.environ.get('LD_LIBRARY_PATH', '')
for d in ld_lib_path.split(':'):
cmd = cmd + " -L " + d
result, ret = subproc_call(cmd + '|| true')
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
res = re.search(expr, result.decode('utf-8'))
if res:
res = res.group(0)
if not os.path.isfile(res):
return None
return os.path.realpath(res)
def _use_ldconfig(name):
"""
Find so in `ldconfig -p`.
It does not handle LD_LIBRARY_PATH.
"""
with change_env('LC_ALL', 'C'), change_env('LANG', 'C'):
ldconfig, ret = subproc_call("ldconfig -p")
ldconfig = ldconfig.decode('utf-8')
if ret != 0:
return None
expr = r'\s+(lib%s\.[^\s]+)\s+\(.*=>\s+(.*)' % (re.escape(name))
res = re.search(expr, ldconfig)
if not res:
return None
else:
ret = res.group(2)
return os.path.realpath(ret)
if sys.platform.startswith('linux'):
return _use_proc_maps(name) or _use_ld(name) or _use_ldconfig(name) or find_library(name)
return find_library(name) # don't know what to do
| 9,003 | 28.045161 | 107 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/logger.py | # File: logger.py
"""
The logger module itself has the common logging functions of Python's
:class:`logging.Logger`. For example:
.. code-block:: python
from tensorpack.utils import logger
logger.set_logger_dir('train_log/test')
logger.info("Test")
logger.error("Error happened!")
"""
import logging
import os
import os.path
import shutil
import sys
from datetime import datetime
from six.moves import input
from termcolor import colored
__all__ = ['set_logger_dir', 'auto_set_dir', 'get_logger_dir']
class _MyFormatter(logging.Formatter):
def format(self, record):
date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green')
msg = '%(message)s'
if record.levelno == logging.WARNING:
fmt = date + ' ' + colored('WRN', 'red', attrs=['blink']) + ' ' + msg
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
fmt = date + ' ' + colored('ERR', 'red', attrs=['blink', 'underline']) + ' ' + msg
elif record.levelno == logging.DEBUG:
fmt = date + ' ' + colored('DBG', 'yellow', attrs=['blink']) + ' ' + msg
else:
fmt = date + ' ' + msg
if hasattr(self, '_style'):
# Python3 compatibility
self._style._fmt = fmt
self._fmt = fmt
return super(_MyFormatter, self).format(record)
def _getlogger():
# this file is synced to "dataflow" package as well
package_name = "dataflow" if __name__.startswith("dataflow") else "tensorpack"
logger = logging.getLogger(package_name)
logger.propagate = False
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
logger.addHandler(handler)
return logger
_logger = _getlogger()
_LOGGING_METHOD = ['info', 'warning', 'error', 'critical', 'exception', 'debug', 'setLevel', 'addFilter']
# export logger functions
for func in _LOGGING_METHOD:
locals()[func] = getattr(_logger, func)
__all__.append(func)
# 'warn' is deprecated in logging module
warn = _logger.warning
__all__.append('warn')
def _get_time_str():
return datetime.now().strftime('%m%d-%H%M%S')
# globals: logger file and directory:
LOG_DIR = None
_FILE_HANDLER = None
def _set_file(path):
global _FILE_HANDLER
if os.path.isfile(path):
backup_name = path + '.' + _get_time_str()
shutil.move(path, backup_name)
_logger.info("Existing log file '{}' backuped to '{}'".format(path, backup_name)) # noqa: F821
hdl = logging.FileHandler(
filename=path, encoding='utf-8', mode='w')
hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
_FILE_HANDLER = hdl
_logger.addHandler(hdl)
_logger.info("Argv: " + ' '.join(sys.argv))
def set_logger_dir(dirname, action=None):
"""
Set the directory for global logging.
Args:
dirname(str): log directory
action(str): an action of ["k","d","q"] to be performed
when the directory exists. Will ask user by default.
"d": delete the directory. Note that the deletion may fail when
the directory is used by tensorboard.
"k": keep the directory. This is useful when you resume from a
previous training and want the directory to look as if the
training was not interrupted.
Note that this option does not load old models or any other
old states for you. It simply does nothing.
"""
dirname = os.path.normpath(dirname)
global LOG_DIR, _FILE_HANDLER
if _FILE_HANDLER:
# unload and close the old file handler, so that we may safely delete the logger directory
_logger.removeHandler(_FILE_HANDLER)
del _FILE_HANDLER
def dir_nonempty(dirname):
# If directory exists and nonempty (ignore hidden files), prompt for action
return os.path.isdir(dirname) and len([x for x in os.listdir(dirname) if x[0] != '.'])
if dir_nonempty(dirname):
if not action:
_logger.warning("""\
Log directory {} exists! Use 'd' to delete it. """.format(dirname))
_logger.warning("""\
If you're resuming from a previous run, you can choose to keep it.
Press any other key to exit. """)
while not action:
action = input("Select Action: k (keep) / d (delete) / q (quit):").lower().strip()
act = action
if act == 'b':
backup_name = dirname + _get_time_str()
shutil.move(dirname, backup_name)
info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) # noqa: F821
elif act == 'd':
shutil.rmtree(dirname, ignore_errors=True)
if dir_nonempty(dirname):
shutil.rmtree(dirname, ignore_errors=False)
elif act == 'n':
dirname = dirname + _get_time_str()
info("Use a new log directory {}".format(dirname)) # noqa: F821
elif act == 'k':
pass
else:
raise OSError("Directory {} exits!".format(dirname))
LOG_DIR = dirname
from .fs import mkdir_p
mkdir_p(dirname)
_set_file(os.path.join(dirname, 'log.log'))
def auto_set_dir(action=None, name=None):
"""
Use :func:`logger.set_logger_dir` to set log directory to
"./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running"""
mod = sys.modules['__main__']
basename = os.path.basename(mod.__file__)
auto_dirname = os.path.join('train_log', basename[:basename.rfind('.')])
if name:
auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name
set_logger_dir(auto_dirname, action=action)
def get_logger_dir():
"""
Returns:
The logger directory, or None if not set.
The directory is used for general logging, tensorboard events, checkpoints, etc.
"""
return LOG_DIR
| 5,996 | 33.268571 | 108 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/loadcaffe.py | # File: loadcaffe.py
import numpy as np
import os
import sys
from . import logger
from .concurrency import subproc_call
from .fs import download, get_dataset_path
from .utils import change_env
__all__ = ['load_caffe', 'get_caffe_pb']
CAFFE_PROTO_URL = "https://github.com/BVLC/caffe/raw/master/src/caffe/proto/caffe.proto"
class CaffeLayerProcessor(object):
def __init__(self, net):
self.net = net
self.layer_names = net._layer_names
self.param_dict = {}
self.processors = {
'Convolution': self.proc_conv,
'InnerProduct': self.proc_fc,
'BatchNorm': self.proc_bn,
'Scale': self.proc_scale
}
def process(self):
for idx, layer in enumerate(self.net.layers):
param = layer.blobs
name = self.layer_names[idx]
if layer.type in self.processors:
logger.info("Processing layer {} of type {}".format(
name, layer.type))
dic = self.processors[layer.type](idx, name, param)
self.param_dict.update(dic)
elif len(layer.blobs) != 0:
logger.warn(
"{} layer contains parameters but is not supported!".format(layer.type))
return self.param_dict
def proc_conv(self, idx, name, param):
assert len(param) <= 2
assert param[0].data.ndim == 4
# caffe: ch_out, ch_in, h, w
W = param[0].data.transpose(2, 3, 1, 0)
if len(param) == 1:
return {name + '/W': W}
else:
return {name + '/W': W,
name + '/b': param[1].data}
def proc_fc(self, idx, name, param):
# TODO caffe has an 'transpose' option for fc/W
assert len(param) == 2
prev_layer_name = self.net.bottom_names[name][0]
prev_layer_output = self.net.blobs[prev_layer_name].data
if prev_layer_output.ndim == 4:
logger.info("FC layer {} takes spatial data.".format(name))
W = param[0].data
# original: outx(CxHxW)
W = W.reshape((-1,) + prev_layer_output.shape[1:]).transpose(2, 3, 1, 0)
# become: (HxWxC)xout
else:
W = param[0].data.transpose()
return {name + '/W': W,
name + '/b': param[1].data}
def proc_bn(self, idx, name, param):
scale_factor = param[2].data[0]
return {name + '/mean/EMA': param[0].data / scale_factor,
name + '/variance/EMA': param[1].data / scale_factor}
def proc_scale(self, idx, name, param):
bottom_name = self.net.bottom_names[name][0]
# find the bn layer before this scaling
for i, layer in enumerate(self.net.layers):
if layer.type == 'BatchNorm':
name2 = self.layer_names[i]
bottom_name2 = self.net.bottom_names[name2][0]
if bottom_name2 == bottom_name:
# scaling and BN share the same bottom, should merge
logger.info("Merge {} and {} into one BatchNorm layer".format(
name, name2))
return {name2 + '/beta': param[1].data,
name2 + '/gamma': param[0].data}
# assume this scaling layer is part of some BN
logger.error("Could not find a BN layer corresponding to this Scale layer!")
raise ValueError()
def load_caffe(model_desc, model_file):
"""
Load a caffe model. You must be able to ``import caffe`` to use this
function.
Args:
model_desc (str): path to caffe model description file (.prototxt).
model_file (str): path to caffe model parameter file (.caffemodel).
Returns:
dict: the parameters.
"""
with change_env('GLOG_minloglevel', '2'):
import caffe
caffe.set_mode_cpu()
net = caffe.Net(model_desc, model_file, caffe.TEST)
param_dict = CaffeLayerProcessor(net).process()
logger.info("Model loaded from caffe. Params: " +
", ".join(sorted(param_dict.keys())))
return param_dict
def get_caffe_pb():
"""
Get caffe protobuf.
Returns:
The imported caffe protobuf module.
"""
dir = get_dataset_path('caffe')
caffe_pb_file = os.path.join(dir, 'caffe_pb2.py')
if not os.path.isfile(caffe_pb_file):
download(CAFFE_PROTO_URL, dir)
assert os.path.isfile(os.path.join(dir, 'caffe.proto'))
cmd = "protoc --version"
version, ret = subproc_call(cmd, timeout=3)
if ret != 0:
sys.exit(1)
try:
version = version.decode('utf-8')
version = float('.'.join(version.split(' ')[1].split('.')[:2]))
assert version >= 2.7, "Require protoc>=2.7 for Python3"
except Exception:
logger.exception("protoc --version gives: " + str(version))
raise
cmd = 'cd {} && protoc caffe.proto --python_out .'.format(dir)
ret = os.system(cmd)
assert ret == 0, \
"Command `{}` failed!".format(cmd)
assert os.path.isfile(caffe_pb_file), caffe_pb_file
import imp
return imp.load_source('caffepb', caffe_pb_file)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model', help='.prototxt file')
parser.add_argument('weights', help='.caffemodel file')
parser.add_argument('output', help='output npz file')
args = parser.parse_args()
ret = load_caffe(args.model, args.weights)
if args.output.endswith('.npz'):
np.savez_compressed(args.output, **ret)
elif args.output.endswith('.npy'):
logger.warn("Please use npz format instead!")
np.save(args.output, ret)
else:
raise ValueError("Unknown format {}".format(args.output))
| 5,887 | 34.257485 | 92 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/compatible_serialize.py | from .serialize import loads, dumps # noqa
# keep this file for BC
| 69 | 16.5 | 43 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/viz.py | # File: viz.py
# Credit: zxytim
import numpy as np
import os
import sys
from ..utils.develop import create_dummy_func # noqa
from .argtools import shape2d
from .fs import mkdir_p
try:
import cv2
except ImportError:
pass
__all__ = ['interactive_imshow',
'stack_patches', 'gen_stack_patches',
'dump_dataflow_images', 'intensity_to_rgb',
'draw_boxes']
def interactive_imshow(img, lclick_cb=None, rclick_cb=None, **kwargs):
"""
Args:
img (np.ndarray): an image (expect BGR) to show.
lclick_cb, rclick_cb: a callback ``func(img, x, y)`` for left/right click event.
kwargs: can be {key_cb_a: callback_img, key_cb_b: callback_img}, to
specify a callback ``func(img)`` for keypress.
Some existing keypress event handler:
* q: destroy the current window
* x: execute ``sys.exit()``
* s: save image to "out.png"
"""
name = 'tensorpack_viz_window'
cv2.imshow(name, img)
def mouse_cb(event, x, y, *args):
if event == cv2.EVENT_LBUTTONUP and lclick_cb is not None:
lclick_cb(img, x, y)
elif event == cv2.EVENT_RBUTTONUP and rclick_cb is not None:
rclick_cb(img, x, y)
cv2.setMouseCallback(name, mouse_cb)
key = cv2.waitKey(-1)
while key >= 128:
key = cv2.waitKey(-1)
key = chr(key & 0xff)
cb_name = 'key_cb_' + key
if cb_name in kwargs:
kwargs[cb_name](img)
elif key == 'q':
cv2.destroyWindow(name)
elif key == 'x':
sys.exit()
elif key == 's':
cv2.imwrite('out.png', img)
elif key in ['+', '=']:
img = cv2.resize(img, None, fx=1.3, fy=1.3, interpolation=cv2.INTER_CUBIC)
interactive_imshow(img, lclick_cb, rclick_cb, **kwargs)
elif key == '-':
img = cv2.resize(img, None, fx=0.7, fy=0.7, interpolation=cv2.INTER_CUBIC)
interactive_imshow(img, lclick_cb, rclick_cb, **kwargs)
def _preprocess_patch_list(plist):
plist = np.asarray(plist)
assert plist.dtype != np.object
if plist.ndim == 3:
plist = plist[:, :, :, np.newaxis]
assert plist.ndim == 4 and plist.shape[3] in [1, 3], plist.shape
return plist
def _pad_patch_list(plist, bgcolor):
if isinstance(bgcolor, int):
bgcolor = (bgcolor, bgcolor, bgcolor)
def _pad_channel(plist):
ret = []
for p in plist:
if len(p.shape) == 2:
p = p[:, :, np.newaxis]
if p.shape[2] == 1:
p = np.repeat(p, 3, 2)
ret.append(p)
return ret
plist = _pad_channel(plist)
shapes = [x.shape for x in plist]
ph = max(s[0] for s in shapes)
pw = max(s[1] for s in shapes)
ret = np.zeros((len(plist), ph, pw, 3), dtype=plist[0].dtype)
ret[:, :, :] = bgcolor
for idx, p in enumerate(plist):
s = p.shape
sh = (ph - s[0]) // 2
sw = (pw - s[1]) // 2
ret[idx, sh:sh + s[0], sw:sw + s[1], :] = p
return ret
class Canvas(object):
def __init__(self, ph, pw,
nr_row, nr_col,
channel, border, bgcolor):
self.ph = ph
self.pw = pw
self.nr_row = nr_row
self.nr_col = nr_col
if border is None:
border = int(0.05 * min(ph, pw))
self.border = border
if isinstance(bgcolor, int):
bgchannel = 1
else:
bgchannel = 3
self.bgcolor = bgcolor
self.channel = max(channel, bgchannel)
self.canvas = np.zeros((nr_row * (ph + border) - border,
nr_col * (pw + border) - border,
self.channel), dtype='uint8')
def draw_patches(self, plist):
assert self.nr_row * self.nr_col >= len(plist), \
"{}*{} < {}".format(self.nr_row, self.nr_col, len(plist))
if self.channel == 3 and plist.shape[3] == 1:
plist = np.repeat(plist, 3, axis=3)
cur_row, cur_col = 0, 0
if self.channel == 1:
self.canvas.fill(self.bgcolor)
else:
self.canvas[:, :, :] = self.bgcolor
for patch in plist:
r0 = cur_row * (self.ph + self.border)
c0 = cur_col * (self.pw + self.border)
self.canvas[r0:r0 + self.ph, c0:c0 + self.pw] = patch
cur_col += 1
if cur_col == self.nr_col:
cur_col = 0
cur_row += 1
def get_patchid_from_coord(self, x, y):
x = x // (self.pw + self.border)
y = y // (self.pw + self.border)
idx = y * self.nr_col + x
return idx
def stack_patches(
patch_list, nr_row, nr_col, border=None,
pad=False, bgcolor=255, viz=False, lclick_cb=None):
"""
Stacked patches into grid, to produce visualizations like the following:
.. image:: https://github.com/tensorpack/tensorpack/raw/master/examples/GAN/demo/BEGAN-CelebA-samples.jpg
Args:
patch_list(list[ndarray] or ndarray): NHW or NHWC images in [0,255].
nr_row(int), nr_col(int): rows and cols of the grid.
``nr_col * nr_row`` must be no less than ``len(patch_list)``.
border(int): border length between images.
Defaults to ``0.05 * min(patch_width, patch_height)``.
pad (boolean): when `patch_list` is a list, pad all patches to the maximum height and width.
This option allows stacking patches of different shapes together.
bgcolor(int or 3-tuple): background color in [0, 255]. Either an int
or a BGR tuple.
viz(bool): whether to use :func:`interactive_imshow` to visualize the results.
lclick_cb: A callback function ``f(patch, patch index in patch_list)``
to get called when a patch get clicked in imshow.
Returns:
np.ndarray: the stacked image.
"""
if pad:
patch_list = _pad_patch_list(patch_list, bgcolor)
patch_list = _preprocess_patch_list(patch_list)
if lclick_cb is not None:
viz = True
ph, pw = patch_list.shape[1:3]
canvas = Canvas(ph, pw, nr_row, nr_col,
patch_list.shape[-1], border, bgcolor)
if lclick_cb is not None:
def lclick_callback(img, x, y):
idx = canvas.get_patchid_from_coord(x, y)
lclick_cb(patch_list[idx], idx)
else:
lclick_callback = None
canvas.draw_patches(patch_list)
if viz:
interactive_imshow(canvas.canvas, lclick_cb=lclick_callback)
return canvas.canvas
def gen_stack_patches(patch_list,
nr_row=None, nr_col=None, border=None,
max_width=1000, max_height=1000,
bgcolor=255, viz=False, lclick_cb=None):
"""
Similar to :func:`stack_patches` but with a generator interface.
It takes a much-longer list and yields stacked results one by one.
For example, if ``patch_list`` contains 1000 images and ``nr_row==nr_col==10``,
this generator yields 10 stacked images.
Args:
nr_row(int), nr_col(int): rows and cols of each result.
max_width(int), max_height(int): Maximum allowed size of the
stacked image. If ``nr_row/nr_col`` are None, this number
will be used to infer the rows and cols. Otherwise the option is
ignored.
patch_list, border, viz, lclick_cb: same as in :func:`stack_patches`.
Yields:
np.ndarray: the stacked image.
"""
# setup parameters
patch_list = _preprocess_patch_list(patch_list)
if lclick_cb is not None:
viz = True
ph, pw = patch_list.shape[1:3]
if border is None:
border = int(0.05 * min(ph, pw))
if nr_row is None:
nr_row = int(max_height / (ph + border))
if nr_col is None:
nr_col = int(max_width / (pw + border))
canvas = Canvas(ph, pw, nr_row, nr_col, patch_list.shape[-1], border, bgcolor)
nr_patch = nr_row * nr_col
start = 0
if lclick_cb is not None:
def lclick_callback(img, x, y):
idx = canvas.get_patchid_from_coord(x, y)
idx = idx + start
if idx < end:
lclick_cb(patch_list[idx], idx)
else:
lclick_callback = None
while True:
end = start + nr_patch
cur_list = patch_list[start:end]
if not len(cur_list):
return
canvas.draw_patches(cur_list)
if viz:
interactive_imshow(canvas.canvas, lclick_cb=lclick_callback)
yield canvas.canvas
start = end
def dump_dataflow_images(df, index=0, batched=True,
number=1000, output_dir=None,
scale=1, resize=None, viz=None,
flipRGB=False):
"""
Dump or visualize images of a :class:`DataFlow`.
Args:
df (DataFlow): the DataFlow.
index (int): the index of the image component.
batched (bool): whether the component contains batched images (NHW or
NHWC) or not (HW or HWC).
number (int): how many datapoint to take from the DataFlow.
output_dir (str): output directory to save images, default to not save.
scale (float): scale the value, usually either 1 or 255.
resize (tuple or None): tuple of (h, w) to resize the images to.
viz (tuple or None): tuple of (h, w) determining the grid size to use
with :func:`gen_stack_patches` for visualization. No visualization will happen by
default.
flipRGB (bool): apply a RGB<->BGR conversion or not.
"""
if output_dir:
mkdir_p(output_dir)
if viz is not None:
viz = shape2d(viz)
vizsize = viz[0] * viz[1]
if resize is not None:
resize = tuple(shape2d(resize))
vizlist = []
df.reset_state()
cnt = 0
while True:
for dp in df:
if not batched:
imgbatch = [dp[index]]
else:
imgbatch = dp[index]
for img in imgbatch:
cnt += 1
if cnt == number:
return
if scale != 1:
img = img * scale
if resize is not None:
img = cv2.resize(img, resize)
if flipRGB:
img = img[:, :, ::-1]
if output_dir:
fname = os.path.join(output_dir, '{:03d}.jpg'.format(cnt))
cv2.imwrite(fname, img)
if viz is not None:
vizlist.append(img)
if viz is not None and len(vizlist) >= vizsize:
stack_patches(
vizlist[:vizsize],
nr_row=viz[0], nr_col=viz[1], viz=True)
vizlist = vizlist[vizsize:]
def intensity_to_rgb(intensity, cmap='cubehelix', normalize=False):
"""
Convert a 1-channel matrix of intensities to an RGB image employing a colormap.
This function requires matplotlib. See `matplotlib colormaps
<http://matplotlib.org/examples/color/colormaps_reference.html>`_ for a
list of available colormap.
Args:
intensity (np.ndarray): array of intensities such as saliency.
cmap (str): name of the colormap to use.
normalize (bool): if True, will normalize the intensity so that it has
minimum 0 and maximum 1.
Returns:
np.ndarray: an RGB float32 image in range [0, 255], a colored heatmap.
"""
assert intensity.ndim == 2, intensity.shape
intensity = intensity.astype("float")
if normalize:
intensity -= intensity.min()
intensity /= intensity.max()
cmap = plt.get_cmap(cmap)
intensity = cmap(intensity)[..., :3]
return intensity.astype('float32') * 255.0
def draw_text(img, pos, text, color, font_scale=0.4):
"""
Draw text on an image.
Args:
pos (tuple): x, y; the position of the text
text (str):
font_scale (float):
color (tuple): a 3-tuple BGR color in [0, 255]
"""
img = img.astype(np.uint8)
x0, y0 = int(pos[0]), int(pos[1])
# Compute text size.
font = cv2.FONT_HERSHEY_SIMPLEX
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, 1)
# Place text background.
if x0 + text_w > img.shape[1]:
x0 = img.shape[1] - text_w
if y0 - int(1.15 * text_h) < 0:
y0 = int(1.15 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(img, back_topleft, back_bottomright, color, -1)
# Show text.
text_bottomleft = x0, y0 - int(0.25 * text_h)
cv2.putText(img, text, text_bottomleft, font, font_scale, (222, 222, 222), lineType=cv2.LINE_AA)
return img
def draw_boxes(im, boxes, labels=None, color=None):
"""
Args:
im (np.ndarray): a BGR image in range [0,255]. It will not be modified.
boxes (np.ndarray): a numpy array of shape Nx4 where each row is [x1, y1, x2, y2].
labels: (list[str] or None)
color: a 3-tuple BGR color (in range [0, 255])
Returns:
np.ndarray: a new image.
"""
boxes = np.asarray(boxes, dtype='int32')
if labels is not None:
assert len(labels) == len(boxes), "{} != {}".format(len(labels), len(boxes))
areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
sorted_inds = np.argsort(-areas) # draw large ones first
assert areas.min() > 0, areas.min()
# allow equal, because we are not very strict about rounding error here
assert boxes[:, 0].min() >= 0 and boxes[:, 1].min() >= 0 \
and boxes[:, 2].max() <= im.shape[1] and boxes[:, 3].max() <= im.shape[0], \
"Image shape: {}\n Boxes:\n{}".format(str(im.shape), str(boxes))
im = im.copy()
if color is None:
color = (15, 128, 15)
if im.ndim == 2 or (im.ndim == 3 and im.shape[2] == 1):
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
for i in sorted_inds:
box = boxes[i, :]
if labels is not None:
im = draw_text(im, (box[0], box[1]), labels[i], color=color)
cv2.rectangle(im, (box[0], box[1]), (box[2], box[3]),
color=color, thickness=1)
return im
try:
import matplotlib.pyplot as plt
except (ImportError, RuntimeError):
intensity_to_rgb = create_dummy_func('intensity_to_rgb', 'matplotlib') # noqa
if __name__ == '__main__':
if False:
imglist = []
for i in range(100):
fname = "{:03d}.png".format(i)
imglist.append(cv2.imread(fname))
for idx, patch in enumerate(gen_stack_patches(
imglist, max_width=500, max_height=200)):
of = "patch{:02d}.png".format(idx)
cv2.imwrite(of, patch)
if False:
imglist = []
img = cv2.imread('out.png')
img2 = cv2.resize(img, (300, 300))
viz = stack_patches([img, img2], 1, 2, pad=True, viz=True)
if False:
img = cv2.imread('cat.jpg')
boxes = np.asarray([
[10, 30, 200, 100],
[20, 80, 250, 250]
])
img = draw_boxes(img, boxes, ['asdfasdf', '11111111111111'])
interactive_imshow(img)
| 15,256 | 33.131991 | 109 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/nvml.py | # File: nvml.py
import threading
from ctypes import (
CDLL, POINTER, Structure, byref, c_uint,
c_ulonglong, create_string_buffer)
__all__ = ['NVMLContext']
NVML_ERROR_FUNCTION_NOT_FOUND = 13
NvmlErrorCodes = {"0": "NVML_SUCCESS",
"1": "NVML_ERROR_UNINITIALIZED",
"2": "NVML_ERROR_INVALID_ARGUMENT",
"3": "NVML_ERROR_NOT_SUPPORTED",
"4": "NVML_ERROR_NO_PERMISSION",
"5": "NVML_ERROR_ALREADY_INITIALIZED",
"6": "NVML_ERROR_NOT_FOUND",
"7": "NVML_ERROR_INSUFFICIENT_SIZE",
"8": "NVML_ERROR_INSUFFICIENT_POWER",
"9": "NVML_ERROR_DRIVER_NOT_LOADED",
"10": "NVML_ERROR_TIMEOUT",
"11": "NVML_ERROR_IRQ_ISSUE",
"12": "NVML_ERROR_LIBRARY_NOT_FOUND",
"13": "NVML_ERROR_FUNCTION_NOT_FOUND",
"14": "NVML_ERROR_CORRUPTED_INFOROM",
"15": "NVML_ERROR_GPU_IS_LOST",
"16": "NVML_ERROR_RESET_REQUIRED",
"17": "NVML_ERROR_OPERATING_SYSTEM",
"18": "NVML_ERROR_LIB_RM_VERSION_MISMATCH",
"999": "NVML_ERROR_UNKNOWN"}
class NvmlException(Exception):
def __init__(self, error_code):
super(NvmlException, self).__init__(error_code)
self.error_code = error_code
def __str__(self):
return NvmlErrorCodes[str(self.error_code)]
def _check_return(ret):
if (ret != 0):
raise NvmlException(ret)
return ret
class NVML(object):
"""
Loader for libnvidia-ml.so
"""
_nvmlLib = None
_lib_lock = threading.Lock()
def load(self):
with self._lib_lock:
if self._nvmlLib is None:
self._nvmlLib = CDLL("libnvidia-ml.so.1")
function_pointers = ["nvmlDeviceGetName", "nvmlDeviceGetUUID", "nvmlDeviceGetMemoryInfo",
"nvmlDeviceGetUtilizationRates", "nvmlInit_v2", "nvmlShutdown",
"nvmlDeviceGetCount_v2", "nvmlDeviceGetHandleByIndex_v2"]
self.func_ptr = {n: self._function_pointer(n) for n in function_pointers}
def _function_pointer(self, name):
try:
return getattr(self._nvmlLib, name)
except AttributeError:
raise NvmlException(NVML_ERROR_FUNCTION_NOT_FOUND)
def get_function(self, name):
if name in self.func_ptr.keys():
return self.func_ptr[name]
_NVML = NVML()
class NvidiaDevice(object):
"""Represent a single GPUDevice"""
def __init__(self, hnd):
super(NvidiaDevice, self).__init__()
self.hnd = hnd
def memory(self):
"""Memory information in bytes
Example:
>>> print(ctx.device(0).memory())
{'total': 4238016512L, 'used': 434831360L, 'free': 3803185152L}
Returns:
total/used/free memory in bytes
"""
class GpuMemoryInfo(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
c_memory = GpuMemoryInfo()
_check_return(_NVML.get_function(
"nvmlDeviceGetMemoryInfo")(self.hnd, byref(c_memory)))
return {'total': c_memory.total, 'free': c_memory.free, 'used': c_memory.used}
def utilization(self):
"""Percent of time over the past second was utilized.
Details:
Percent of time over the past second during which one or more kernels was executing on the GPU.
Percent of time over the past second during which global (device) memory was being read or written
Example:
>>> print(ctx.device(0).utilization())
{'gpu': 4L, 'memory': 6L}
"""
class GpuUtilizationInfo(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
c_util = GpuUtilizationInfo()
_check_return(_NVML.get_function(
"nvmlDeviceGetUtilizationRates")(self.hnd, byref(c_util)))
return {'gpu': c_util.gpu, 'memory': c_util.memory}
def name(self):
buflen = 1024
buf = create_string_buffer(buflen)
fn = _NVML.get_function("nvmlDeviceGetName")
ret = fn(self.hnd, buf, c_uint(1024))
_check_return(ret)
return buf.value.decode('utf-8')
class NVMLContext(object):
"""Creates a context to query information
Example:
with NVMLContext() as ctx:
num_gpus = ctx.num_devices()
for device in ctx.devices():
print(device.memory())
print(device.utilization())
"""
def __enter__(self):
"""Create a new context """
_NVML.load()
_check_return(_NVML.get_function("nvmlInit_v2")())
return self
def __exit__(self, type, value, tb):
"""Destroy current context"""
_check_return(_NVML.get_function("nvmlShutdown")())
def num_devices(self):
"""Get number of devices """
c_count = c_uint()
_check_return(_NVML.get_function(
"nvmlDeviceGetCount_v2")(byref(c_count)))
return c_count.value
def devices(self):
"""
Returns:
[NvidiaDevice]: a list of devices
"""
return [self.device(i) for i in range(self.num_devices())]
def device(self, idx):
"""Get a specific GPU device
Args:
idx: index of device
Returns:
NvidiaDevice: single GPU device
"""
class GpuDevice(Structure):
pass
c_nvmlDevice_t = POINTER(GpuDevice)
c_index = c_uint(idx)
device = c_nvmlDevice_t()
_check_return(_NVML.get_function(
"nvmlDeviceGetHandleByIndex_v2")(c_index, byref(device)))
return NvidiaDevice(device)
if __name__ == '__main__':
with NVMLContext() as ctx:
for idx, dev in enumerate(ctx.devices()):
print(idx, dev.name())
with NVMLContext() as ctx:
print(ctx.devices())
print(ctx.devices()[0].utilization())
| 6,281 | 28.218605 | 109 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/debug.py | # File: debug.py
import sys
def enable_call_trace():
""" Enable trace for calls to any function. """
def tracer(frame, event, arg):
if event == 'call':
co = frame.f_code
func_name = co.co_name
if func_name == 'write' or func_name == 'print':
# ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
if caller:
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print('Call to `%s` on line %s:%s from %s:%s' %
(func_name, func_filename, func_line_no,
caller_filename, caller_line_no))
return
sys.settrace(tracer)
if __name__ == '__main__':
enable_call_trace()
def b(a):
print(2)
def a():
print(1)
b(1)
a()
| 1,023 | 23.97561 | 63 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/serialize.py | # File: serialize.py
import os
import pickle
from multiprocessing.reduction import ForkingPickler
import msgpack
import msgpack_numpy
msgpack_numpy.patch()
assert msgpack.version >= (0, 5, 2)
__all__ = ['loads', 'dumps']
MAX_MSGPACK_LEN = 1000000000
class MsgpackSerializer(object):
@staticmethod
def dumps(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object.
"""
return msgpack.dumps(obj, use_bin_type=True)
@staticmethod
def loads(buf):
"""
Args:
buf: the output of `dumps`.
"""
# Since 0.6, the default max size was set to 1MB.
# We change it to approximately 1G.
return msgpack.loads(buf, raw=False,
max_bin_len=MAX_MSGPACK_LEN,
max_array_len=MAX_MSGPACK_LEN,
max_map_len=MAX_MSGPACK_LEN,
max_str_len=MAX_MSGPACK_LEN)
class PyarrowSerializer(object):
@staticmethod
def dumps(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object.
May not be compatible across different versions of pyarrow.
"""
import pyarrow as pa
return pa.serialize(obj).to_buffer()
@staticmethod
def dumps_bytes(obj):
"""
Returns:
bytes
"""
return PyarrowSerializer.dumps(obj).to_pybytes()
@staticmethod
def loads(buf):
"""
Args:
buf: the output of `dumps` or `dumps_bytes`.
"""
import pyarrow as pa
return pa.deserialize(buf)
class PickleSerializer(object):
@staticmethod
def dumps(obj):
"""
Returns:
bytes
"""
return pickle.dumps(obj, protocol=-1)
@staticmethod
def loads(buf):
"""
Args:
bytes
"""
return pickle.loads(buf)
# Define the default serializer to be used that dumps data to bytes
_DEFAULT_S = os.environ.get('TENSORPACK_SERIALIZE', 'pickle')
if _DEFAULT_S == "pyarrow":
dumps = PyarrowSerializer.dumps_bytes
loads = PyarrowSerializer.loads
elif _DEFAULT_S == "pickle":
dumps = PickleSerializer.dumps
loads = PickleSerializer.loads
else:
dumps = MsgpackSerializer.dumps
loads = MsgpackSerializer.loads
# Define the default serializer to be used for passing data
# among a pair of peers. In this case the deserialization is
# known to happen only once
_DEFAULT_S = os.environ.get('TENSORPACK_ONCE_SERIALIZE', 'pickle')
if _DEFAULT_S == "pyarrow":
dumps_once = PyarrowSerializer.dumps
loads_once = PyarrowSerializer.loads
elif _DEFAULT_S == "pickle":
dumps_once = ForkingPickler.dumps
loads_once = ForkingPickler.loads
else:
dumps_once = MsgpackSerializer.dumps
loads_once = MsgpackSerializer.loads
| 2,979 | 23.227642 | 71 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/naming.py | # File: naming.py
GLOBAL_STEP_INCR_OP_NAME = 'global_step_incr'
# extra variables to summarize during training in a moving-average way
MOVING_SUMMARY_OPS_KEY = 'MOVING_SUMMARY_OPS'
| 208 | 22.222222 | 70 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/gpu.py | # File: gpu.py
import os
from . import logger
from .concurrency import subproc_call
from .nvml import NVMLContext
from .utils import change_env
__all__ = ['change_gpu', 'get_nr_gpu', 'get_num_gpu']
def change_gpu(val):
"""
Args:
val: an integer, the index of the GPU or -1 to disable GPU.
Returns:
a context where ``CUDA_VISIBLE_DEVICES=val``.
"""
val = str(val)
if val == '-1':
val = ''
return change_env('CUDA_VISIBLE_DEVICES', val)
def get_num_gpu():
"""
Returns:
int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system.
"""
def warn_return(ret, message):
try:
import tensorflow as tf
except ImportError:
return ret
built_with_cuda = tf.test.is_built_with_cuda()
if not built_with_cuda and ret > 0:
logger.warn(message + "But TensorFlow was not built with CUDA support and could not use GPUs!")
return ret
env = os.environ.get('CUDA_VISIBLE_DEVICES', None)
if env:
return warn_return(len(env.split(',')), "Found non-empty CUDA_VISIBLE_DEVICES. ")
output, code = subproc_call("nvidia-smi -L", timeout=5)
if code == 0:
output = output.decode('utf-8')
return warn_return(len(output.strip().split('\n')), "Found nvidia-smi. ")
try:
# Use NVML to query device properties
with NVMLContext() as ctx:
return warn_return(ctx.num_devices(), "NVML found nvidia devices. ")
except Exception:
# Fallback
logger.info("Loading local devices by TensorFlow ...")
try:
import tensorflow as tf
# available since TF 1.14
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
except AttributeError:
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
# Note this will initialize all GPUs and therefore has side effect
gpu_devices = [x.name for x in local_device_protos if x.device_type == 'GPU']
return len(gpu_devices)
get_nr_gpu = get_num_gpu
| 2,254 | 29.066667 | 107 | py |
SyNet | SyNet-master/tensorpack/tensorpack/contrib/keras.py | # File: keras.py
from contextlib import contextmanager
import six
import tensorflow as tf
from tensorflow import keras
from ..callbacks import Callback, CallbackToHook, InferenceRunner, InferenceRunnerBase, ScalarStats
from ..models.regularize import regularize_cost_from_collection
from ..tfutils.collection import backup_collection, restore_collection
from ..tfutils.common import get_op_tensor_name
from ..tfutils.scope_utils import cached_name_scope
from ..tfutils.summary import add_moving_summary
from ..tfutils.tower import get_current_tower_context
from ..train import SimpleTrainer, SyncMultiGPUTrainerParameterServer, Trainer
from ..train.interface import apply_default_prefetch
from ..train.trainers import DistributedTrainerBase
from ..utils import logger
from ..utils.gpu import get_nr_gpu
__all__ = ['KerasPhaseCallback', 'setup_keras_trainer', 'KerasModel']
TOTAL_LOSS_NAME = 'total_loss'
def _check_name(tensor, name):
tensorname = get_op_tensor_name(tensor.name)[0]
assert tensorname.split('/')[-1] == name, \
"{} does not match {}, you may have name conflict somewhere!".format(tensor.name, name)
class KerasModelCaller(object):
"""
Keras model doesn't support variable scope reuse.
This is a wrapper around keras model to mimic reuse.
"""
def __init__(self, get_model):
self.get_model = get_model
self.cached_model = None
def __call__(self, *input_tensors):
"""
Args:
input_tensors ([tf.Tensor])
Returns:
output tensors of this tower, evaluated with the input tensors.
"""
reuse = tf.get_variable_scope().reuse
old_trainable_names = {x.name for x in tf.trainable_variables()}
trainable_backup = backup_collection([tf.GraphKeys.TRAINABLE_VARIABLES])
update_ops_backup = backup_collection([tf.GraphKeys.UPDATE_OPS])
def post_process_model(model):
added_trainable_names = {x.name for x in tf.trainable_variables()}
restore_collection(trainable_backup)
for v in model.weights:
# In Keras, the collection is not respected and could contain non-trainable vars.
# We put M.weights into the collection instead.
if v.name not in old_trainable_names and v.name in added_trainable_names:
tf.add_to_collection(tf.GraphKeys.TRAINABLE_VARIABLES, v)
new_trainable_names = {x.name for x in tf.trainable_variables()}
for n in added_trainable_names:
if n not in new_trainable_names:
logger.warn("Keras created trainable variable '{}' which is actually not trainable. "
"This was automatically corrected.".format(n))
# Keras models might not use this collection at all (in some versions).
restore_collection(update_ops_backup)
for op in model.updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, op)
if self.cached_model is None:
assert not reuse
# starting from some versions, tf.keras starts to prepend name scope to variable names ..
@contextmanager
def clear_tower0_name_scope():
ns = tf.get_default_graph().get_name_scope()
if ns == 'tower0':
with tf.name_scope('/'):
yield
else:
yield
with clear_tower0_name_scope():
model = self.cached_model = self.get_model(*input_tensors)
assert isinstance(model, keras.Model), \
"Your get_model function should return a `tf.keras.Model`!"
outputs = model.outputs
elif reuse:
# use the cached Keras model to mimic reuse
# NOTE: ctx.is_training won't be useful inside model,
# because inference will always use the cached Keras model
model = self.cached_model
outputs = model.call(*input_tensors)
else:
# create new Keras model if not reuse
model = self.get_model(*input_tensors)
outputs = model.outputs
post_process_model(model)
if isinstance(outputs, list) and len(outputs) == 1:
return outputs[0]
return outputs
class KerasPhaseCallback(Callback):
"""
Keras needs an extra input if learning_phase is used by the model
This callback will be used:
1. By the trainer with isTrain=True
2. By InferenceRunner with isTrain=False, in the form of hooks
If you use :class:`KerasModel` or :func:`setup_keras_trainer`,
this callback will be automatically added when needed.
"""
def __init__(self, isTrain):
assert isinstance(isTrain, bool), isTrain
self._isTrain = isTrain
self._learning_phase = keras.backend.learning_phase()
def _setup_graph(self):
logger.info("Using Keras learning phase {} in the graph!".format(
self._learning_phase.name))
cbs = self.trainer._callbacks.cbs
for cb in cbs:
# XXX HACK
if isinstance(cb, InferenceRunnerBase):
h = CallbackToHook(KerasPhaseCallback(False))
cb.register_hook(h)
def _before_run(self, ctx):
return tf.train.SessionRunArgs(
fetches=[], feed_dict={self._learning_phase: int(self._isTrain)})
def setup_keras_trainer(
trainer, get_model,
input_signature, target_signature,
input, optimizer, loss, metrics):
"""
Args:
trainer (SingleCostTrainer):
get_model (input1, input2, ... -> tf.keras.Model):
A function which takes tensors, builds and returns a Keras model.
It will be part of the tower function.
input (InputSource):
optimizer (tf.train.Optimizer):
loss, metrics: list of strings
"""
assert isinstance(optimizer, tf.train.Optimizer), optimizer
assert isinstance(loss, list), loss
assert len(loss) >= 1, "No loss was given!"
assert isinstance(metrics, list), metrics
model_caller = KerasModelCaller(get_model)
nr_inputs = len(input_signature)
def get_cost(*inputs):
ctx = get_current_tower_context()
input_tensors = list(inputs[:nr_inputs])
target_tensors = list(inputs[nr_inputs:])
# TODO mapping between target tensors & output tensors
outputs = model_caller(*input_tensors)
if isinstance(outputs, tf.Tensor):
outputs = [outputs]
assert len(outputs) == len(target_tensors), \
"len({}) != len({})".format(str(outputs), str(target_tensors))
assert len(outputs) == len(loss), \
"len({}) != len({})".format(str(outputs), str(loss))
loss_tensors = []
for idx, loss_name in enumerate(loss):
with cached_name_scope('keras_loss', top_level=False):
loss_fn = keras.losses.get(loss_name)
curr_loss = loss_fn(target_tensors[idx], outputs[idx])
curr_loss = tf.reduce_mean(curr_loss, name=loss_name)
_check_name(curr_loss, loss_name)
loss_tensors.append(curr_loss)
loss_reg = regularize_cost_from_collection()
if loss_reg is not None:
total_loss = tf.add_n(loss_tensors + [loss_reg], name=TOTAL_LOSS_NAME)
add_moving_summary(loss_reg, total_loss, *loss_tensors)
else:
total_loss = tf.add_n(loss_tensors, name=TOTAL_LOSS_NAME)
add_moving_summary(total_loss, *loss_tensors)
if metrics and (ctx.is_main_training_tower or not ctx.is_training):
# for list: one metric for each output
metric_tensors = []
for oid, metric_name in enumerate(metrics):
output_tensor = outputs[oid]
target_tensor = target_tensors[oid] # TODO may not have the same mapping?
with cached_name_scope('keras_metric', top_level=False):
metric_fn = keras.metrics.get(metric_name)
metric_tensor = metric_fn(target_tensor, output_tensor)
metric_tensor = tf.reduce_mean(metric_tensor, name=metric_name)
_check_name(metric_tensor, metric_name)
# check name conflict here
metric_tensors.append(metric_tensor)
add_moving_summary(*metric_tensors)
return total_loss
trainer.setup_graph(
input_signature + target_signature,
input,
get_cost,
lambda: optimizer)
if isinstance(keras.backend.learning_phase(), tf.Tensor) and len(keras.backend.learning_phase().consumers()) > 0:
# check if learning_phase is used in this model
trainer.register_callback(KerasPhaseCallback(True))
class KerasModel(object):
def __init__(self, get_model, input_signature=None, target_signature=None,
input=None, trainer=None):
"""
Args:
get_model (input1, input2, ... -> keras.Model):
A function which takes tensors, builds and returns a Keras model.
It will be part of the tower function.
input_signature ([tf.TensorSpec]): required. The signature for inputs.
target_signature ([tf.TensorSpec]): required. The signature for the targets tensors.
input (InputSource | DataFlow): the InputSource or DataFlow where the input data comes from.
trainer (Trainer): the default will check the number of available GPUs and use them all.
"""
self.get_model = get_model
assert callable(get_model), get_model
self.input_signature = input_signature
self.target_signature = target_signature
if trainer is None:
nr_gpu = get_nr_gpu()
if nr_gpu <= 1:
trainer = SimpleTrainer()
else:
# the default multi-gpu trainer
trainer = SyncMultiGPUTrainerParameterServer(nr_gpu)
assert isinstance(trainer, Trainer), trainer
assert not isinstance(trainer, DistributedTrainerBase)
assert input is not None, "Argument 'input' is required!"
self.input = apply_default_prefetch(input, trainer)
self.trainer = trainer
def compile(self, optimizer, loss, metrics=None):
"""
Args:
optimizer (tf.train.Optimizer):
loss, metrics: string or list of strings
"""
if isinstance(loss, six.string_types):
loss = [loss]
if metrics is None:
metrics = []
if isinstance(metrics, six.string_types):
metrics = [metrics]
self._stats_to_inference = loss + metrics + [TOTAL_LOSS_NAME]
setup_keras_trainer(
self.trainer, get_model=self.get_model,
input_signature=self.input_signature,
target_signature=self.target_signature,
input=self.input,
optimizer=optimizer,
loss=loss,
metrics=metrics)
def fit(self, validation_data=None, **kwargs):
"""
Args:
validation_data (DataFlow or InputSource): to be used for inference.
The inference callback is added as the first in the callback list.
If you need to use it in a different order, please write it in the callback list manually.
kwargs: same arguments as :meth:`Trainer.train_with_defaults`.
"""
callbacks = kwargs.pop('callbacks', [])
if validation_data is not None:
# There is no way to guess where users want this callback. So we have to choose one.
# MinSaver may need results from this callback,
# so we put this callback at first.
callbacks.insert(0, InferenceRunner(
validation_data, ScalarStats(self._stats_to_inference)))
self.trainer.train_with_defaults(callbacks=callbacks, **kwargs)
| 12,196 | 40.345763 | 117 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/base.py | # File: base.py
from abc import ABCMeta, abstractmethod
import six
import tensorflow as tf
from ..input_source import PlaceholderInput
from ..tfutils.common import get_tensors_by_names, get_op_tensor_name
from ..tfutils.tower import PredictTowerContext
__all__ = ['PredictorBase',
'OnlinePredictor', 'OfflinePredictor']
@six.add_metaclass(ABCMeta)
class PredictorBase(object):
"""
Base class for all predictors.
Attributes:
return_input (bool): whether the call will also return (inputs, outputs)
or just outputs
"""
def __call__(self, *dp):
"""
Call the predictor on some inputs.
Example:
When you have a predictor defined with two inputs, call it with:
.. code-block:: python
predictor(e1, e2)
Returns:
list[array]: list of outputs
"""
output = self._do_call(dp)
if self.return_input:
return (dp, output)
else:
return output
@abstractmethod
def _do_call(self, dp):
"""
Args:
dp: input datapoint. must have the same length as input_names
Returns:
output as defined by the config
"""
class AsyncPredictorBase(PredictorBase):
""" Base class for all async predictors. """
@abstractmethod
def put_task(self, dp, callback=None):
"""
Args:
dp (list): A datapoint as inputs. It could be either batched or not
batched depending on the predictor implementation).
callback: a thread-safe callback to get called with
either outputs or (inputs, outputs), if `return_input` is True.
Returns:
concurrent.futures.Future: a Future of results
"""
@abstractmethod
def start(self):
""" Start workers """
def _do_call(self, dp):
fut = self.put_task(dp)
# in Tornado, Future.result() doesn't wait
return fut.result()
class OnlinePredictor(PredictorBase):
"""
A predictor which directly use an existing session and given tensors.
Attributes:
sess: The tf.Session object associated with this predictor.
"""
ACCEPT_OPTIONS = False
""" See Session.make_callable """
def __init__(self, input_tensors, output_tensors,
return_input=False, sess=None):
"""
Args:
input_tensors (list): list of names.
output_tensors (list): list of names.
return_input (bool): same as :attr:`PredictorBase.return_input`.
sess (tf.Session): the session this predictor runs in. If None,
will use the default session at the first call.
Note that in TensorFlow, default session is thread-local.
"""
def normalize_name(t):
if isinstance(t, six.string_types):
return get_op_tensor_name(t)[1]
return t
self.return_input = return_input
self.input_tensors = [normalize_name(x) for x in input_tensors]
self.output_tensors = [normalize_name(x) for x in output_tensors]
self.sess = sess
if sess is not None:
self._callable = sess.make_callable(
fetches=output_tensors,
feed_list=input_tensors,
accept_options=self.ACCEPT_OPTIONS)
else:
self._callable = None
def _do_call(self, dp):
assert len(dp) == len(self.input_tensors), \
"{} != {}".format(len(dp), len(self.input_tensors))
if self.sess is None:
self.sess = tf.get_default_session()
assert self.sess is not None, "Predictor isn't called under a default session!"
if self._callable is None:
self._callable = self.sess.make_callable(
fetches=self.output_tensors,
feed_list=self.input_tensors,
accept_options=self.ACCEPT_OPTIONS)
# run_metadata = tf.RunMetadata()
# options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
return self._callable(*dp)
class OfflinePredictor(OnlinePredictor):
""" A predictor built from a given config.
A single-tower model will be built without any prefix.
Example:
.. code-block:: python
config = PredictConfig(model=my_model,
inputs_names=['image'],
output_names=['linear/output', 'prediction'])
predictor = OfflinePredictor(config)
batch_image = np.random.rand(1, 100, 100, 3)
batch_output, batch_prediction = predictor(batch_image)
"""
def __init__(self, config):
"""
Args:
config (PredictConfig): the config to use.
"""
self.graph = config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(config.input_signature)
with PredictTowerContext(''):
config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(config.input_names)
output_tensors = get_tensors_by_names(config.output_names)
config.session_init._setup_graph()
sess = config.session_creator.create_session()
config.session_init._run_init(sess)
super(OfflinePredictor, self).__init__(
input_tensors, output_tensors, config.return_input, sess)
| 5,588 | 30.937143 | 91 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/concurrency.py | # File: concurrency.py
import multiprocessing
import numpy as np
from concurrent.futures import Future
import tensorflow as tf
from six.moves import queue, range
from ..compat import tfv1
from ..tfutils.model_utils import describe_trainable_vars
from ..utils import logger
from ..utils.concurrency import DIE, ShareSessionThread, StoppableThread
from .base import AsyncPredictorBase, OfflinePredictor, OnlinePredictor
__all__ = ['MultiThreadAsyncPredictor']
class MultiProcessPredictWorker(multiprocessing.Process):
""" Base class for predict worker that runs offline in multiprocess"""
def __init__(self, idx, config):
"""
Args:
idx (int): index of the worker. the 0th worker will print log.
config (PredictConfig): the config to use.
"""
super(MultiProcessPredictWorker, self).__init__()
self.name = "MultiProcessPredictWorker-{}".format(idx)
self.idx = idx
self.config = config
def _init_runtime(self):
""" Call _init_runtime under different CUDA_VISIBLE_DEVICES, you'll
have workers that run on multiGPUs
"""
if self.idx != 0:
from tensorpack.models.registry import disable_layer_logging
disable_layer_logging()
self.predictor = OfflinePredictor(self.config)
if self.idx == 0:
with self.predictor.graph.as_default():
describe_trainable_vars()
class MultiProcessQueuePredictWorker(MultiProcessPredictWorker):
"""
An offline predictor worker that takes input and produces output by queue.
Each process will exit when they see :class:`DIE`.
"""
def __init__(self, idx, inqueue, outqueue, config):
"""
Args:
idx, config: same as in :class:`MultiProcessPredictWorker`.
inqueue (multiprocessing.Queue): input queue to get data point. elements are (task_id, dp)
outqueue (multiprocessing.Queue): output queue to put result. elements are (task_id, output)
"""
super(MultiProcessQueuePredictWorker, self).__init__(idx, config)
self.inqueue = inqueue
self.outqueue = outqueue
assert isinstance(self.inqueue, multiprocessing.queues.Queue)
assert isinstance(self.outqueue, multiprocessing.queues.Queue)
def run(self):
self._init_runtime()
while True:
tid, dp = self.inqueue.get()
if tid == DIE:
self.outqueue.put((DIE, None))
return
else:
self.outqueue.put((tid, self.predictor(*dp)))
class PredictorWorkerThread(StoppableThread, ShareSessionThread):
def __init__(self, queue, pred_func, id, batch_size=5):
super(PredictorWorkerThread, self).__init__()
self.name = "PredictorWorkerThread-{}".format(id)
self.queue = queue
self.func = pred_func
self.daemon = True
self.batch_size = batch_size
self.id = id
def run(self):
with self.default_sess():
while not self.stopped():
batched, futures = self.fetch_batch()
try:
outputs = self.func(*batched)
except tf.errors.CancelledError:
for f in futures:
f.cancel()
logger.warn("In PredictorWorkerThread id={}, call was cancelled.".format(self.id))
return
# print "Worker {} batched {} Queue {}".format(
# self.id, len(futures), self.queue.qsize())
# debug, for speed testing
# if not hasattr(self, 'xxx'):
# self.xxx = outputs = self.func(batched)
# else:
# outputs = [[self.xxx[0][0]] * len(batched[0]), [self.xxx[1][0]] * len(batched[0])]
for idx, f in enumerate(futures):
f.set_result([k[idx] for k in outputs])
def fetch_batch(self):
""" Fetch a batch of data without waiting"""
inp, f = self.queue.get()
nr_input_var = len(inp)
batched, futures = [[] for _ in range(nr_input_var)], []
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
while len(futures) < self.batch_size:
try:
inp, f = self.queue.get_nowait()
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
except queue.Empty:
break # do not wait
for k in range(nr_input_var):
batched[k] = np.asarray(batched[k])
return batched, futures
class MultiThreadAsyncPredictor(AsyncPredictorBase):
"""
An multithreaded online async predictor which runs a list of OnlinePredictor.
It would do an extra batching internally.
"""
def __init__(self, predictors, batch_size=5):
"""
Args:
predictors (list): a list of OnlinePredictor available to use.
batch_size (int): the maximum of an internal batch.
"""
assert len(predictors)
self._need_default_sess = False
for k in predictors:
assert isinstance(k, OnlinePredictor), type(k)
if k.sess is None:
self._need_default_sess = True
# TODO support predictors.return_input here
assert not k.return_input
self.input_queue = queue.Queue(maxsize=len(predictors) * 100)
self.threads = [
PredictorWorkerThread(
self.input_queue, f, id, batch_size=batch_size)
for id, f in enumerate(predictors)]
def start(self):
if self._need_default_sess:
assert tfv1.get_default_session() is not None, \
"Not session is bind to predictors, " \
"MultiThreadAsyncPredictor.start() has to be called under a default session!"
for t in self.threads:
t.start()
def put_task(self, dp, callback=None):
"""
Args:
dp (list): A datapoint as inputs. It could be either batched or not
batched depending on the predictor implementation).
callback: a thread-safe callback. When the results are ready, it will be called
with the "future" object.
Returns:
concurrent.futures.Future: a Future of results.
"""
f = Future()
if callback is not None:
f.add_done_callback(callback)
self.input_queue.put((dp, f))
return f
| 6,665 | 36.033333 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/dataset.py | # File: dataset.py
import multiprocessing
import os
from abc import ABCMeta, abstractmethod
import six
from ..dataflow import DataFlow
from ..dataflow.remote import dump_dataflow_to_process_queue
from ..utils import logger
from ..utils.develop import HIDE_DOC
from ..utils.concurrency import DIE, OrderedResultGatherProc, ensure_proc_terminate
from ..utils.gpu import change_gpu, get_num_gpu
from ..utils.utils import get_tqdm
from .base import OfflinePredictor
from .concurrency import MultiProcessQueuePredictWorker
from .config import PredictConfig
__all__ = ['DatasetPredictorBase', 'SimpleDatasetPredictor',
'MultiProcessDatasetPredictor']
@six.add_metaclass(ABCMeta)
class DatasetPredictorBase(object):
""" Base class for dataset predictors.
These are predictors which run over a :class:`DataFlow`.
"""
def __init__(self, config, dataset):
"""
Args:
config (PredictConfig): the config of predictor.
dataset (DataFlow): the DataFlow to run on.
"""
assert isinstance(dataset, DataFlow)
assert isinstance(config, PredictConfig)
self.config = config
self.dataset = dataset
@abstractmethod
def get_result(self):
"""
Yields:
output for each datapoint in the DataFlow.
"""
pass
def get_all_result(self):
"""
Returns:
list: all outputs for all datapoints in the DataFlow.
"""
return list(self.get_result())
class SimpleDatasetPredictor(DatasetPredictorBase):
"""
Simply create one predictor and run it on the DataFlow.
"""
def __init__(self, config, dataset):
super(SimpleDatasetPredictor, self).__init__(config, dataset)
self.predictor = OfflinePredictor(config)
@HIDE_DOC
def get_result(self):
self.dataset.reset_state()
try:
sz = len(self.dataset)
except NotImplementedError:
sz = 0
with get_tqdm(total=sz, disable=(sz == 0)) as pbar:
for dp in self.dataset:
res = self.predictor(*dp)
yield res
pbar.update()
class MultiProcessDatasetPredictor(DatasetPredictorBase):
"""
Run prediction in multiple processes, on either CPU or GPU.
Each process fetch datapoints as tasks and run predictions independently.
"""
# TODO allow unordered
def __init__(self, config, dataset, nr_proc, use_gpu=True, ordered=True):
"""
Args:
config: same as in :class:`DatasetPredictorBase`.
dataset: same as in :class:`DatasetPredictorBase`.
nr_proc (int): number of processes to use
use_gpu (bool): use GPU or CPU.
If GPU, then ``nr_proc`` cannot be more than what's in
CUDA_VISIBLE_DEVICES.
ordered (bool): produce outputs in the original order of the
datapoints. This will be a bit slower. Otherwise, :meth:`get_result` will produce
outputs in any order.
"""
if config.return_input:
logger.warn("Using the option `return_input` in MultiProcessDatasetPredictor might be slow")
assert nr_proc >= 1, nr_proc
super(MultiProcessDatasetPredictor, self).__init__(config, dataset)
self.nr_proc = nr_proc
self.ordered = ordered
self.inqueue, self.inqueue_proc = dump_dataflow_to_process_queue(
self.dataset, nr_proc * 2, self.nr_proc) # put (idx, dp) to inqueue
if use_gpu:
try:
gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
except KeyError:
gpus = list(range(get_num_gpu()))
assert len(gpus) >= self.nr_proc, \
"nr_proc={} while only {} gpus available".format(
self.nr_proc, len(gpus))
else:
gpus = ['-1'] * self.nr_proc
# worker produces (idx, result) to outqueue
self.outqueue = multiprocessing.Queue()
self.workers = [MultiProcessQueuePredictWorker(
i, self.inqueue, self.outqueue, self.config)
for i in range(self.nr_proc)]
# start inqueue and workers
self.inqueue_proc.start()
for p, gpuid in zip(self.workers, gpus):
if gpuid == '-1':
logger.info("Worker {} uses CPU".format(p.idx))
else:
logger.info("Worker {} uses GPU {}".format(p.idx, gpuid))
with change_gpu(gpuid):
p.start()
if ordered:
self.result_queue = OrderedResultGatherProc(
self.outqueue, nr_producer=self.nr_proc)
self.result_queue.start()
ensure_proc_terminate(self.result_queue)
else:
self.result_queue = self.outqueue
ensure_proc_terminate(self.workers + [self.inqueue_proc])
@HIDE_DOC
def get_result(self):
try:
sz = len(self.dataset)
except NotImplementedError:
sz = 0
with get_tqdm(total=sz, disable=(sz == 0)) as pbar:
die_cnt = 0
while True:
res = self.result_queue.get()
pbar.update()
if res[0] != DIE:
yield res[1]
else:
die_cnt += 1
if die_cnt == self.nr_proc:
break
self.inqueue_proc.join()
self.inqueue_proc.terminate()
if self.ordered: # if ordered, than result_queue is a Process
self.result_queue.join()
self.result_queue.terminate()
for p in self.workers:
p.join()
p.terminate()
| 5,787 | 32.847953 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/feedfree.py |
from tensorflow.python.training.monitored_session import _HookedSession as HookedSession
from ..callbacks import Callbacks
from ..tfutils.tower import PredictTowerContext
from .base import PredictorBase
__all__ = ['FeedfreePredictor']
class FeedfreePredictor(PredictorBase):
"""
Create a predictor that takes inputs from an :class:`InputSource`, instead of from feeds.
An instance `pred` of :class:`FeedfreePredictor` can be called only by `pred()`, which returns
a list of output values as defined in config.output_names.
"""
def __init__(self, config, input_source):
"""
Args:
config (PredictConfig): the config to use.
input_source (InputSource): the feedfree InputSource to use.
Must match the signature of the tower function in config.
"""
self._config = config
self._input_source = input_source
assert config.return_input is False, \
"return_input is not supported in FeedfreePredictor! " \
"If you need to fetch inputs, add the names to the output_names!"
self._hooks = []
self.graph = config._maybe_create_graph()
with self.graph.as_default():
self._input_callbacks = Callbacks(
self._input_source.setup(config.input_signature))
with PredictTowerContext(''):
self._input_tensors = self._input_source.get_input_tensors()
config.tower_func(*self._input_tensors)
self._tower_handle = config.tower_func.towers[-1]
self._output_tensors = self._tower_handle.get_tensors(config.output_names)
self._input_callbacks.setup_graph(None)
for h in self._input_callbacks.get_hooks():
self._register_hook(h)
self._initialize_session()
def _register_hook(self, hook):
"""
Args:
hook (tf.train.SessionRunHook):
"""
self._hooks.append(hook)
def _initialize_session(self):
# init the session
self._config.session_init._setup_graph()
self._sess = self._config.session_creator.create_session()
self._config.session_init._run_init(self._sess)
with self._sess.as_default():
self._input_callbacks.before_train()
self._hooked_sess = HookedSession(self._sess, self._hooks)
def __call__(self):
return self._hooked_sess.run(self._output_tensors)
def _do_call(self):
raise NotImplementedError("You're calling the wrong function!")
| 2,598 | 35.097222 | 98 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/multigpu.py | # File: multigpu.py
import tensorflow as tf
from ..input_source import PlaceholderInput
from ..tfutils.tower import PredictTowerContext
from ..utils import logger
from .base import OnlinePredictor
__all__ = ['MultiTowerOfflinePredictor',
'DataParallelOfflinePredictor']
class MultiTowerOfflinePredictor(OnlinePredictor):
""" A multi-tower multi-GPU predictor.
It builds one predictor for each tower.
"""
def __init__(self, config, towers):
"""
Args:
config (PredictConfig): the config to use.
towers: a list of relative GPU id.
"""
assert len(towers) > 0
self.graph = config._maybe_create_graph()
self.predictors = []
self.return_input = config.return_input
with self.graph.as_default():
handles = []
input = PlaceholderInput()
input.setup(config.input_signature)
for idx, t in enumerate(towers):
tower_name = 'tower' + str(t)
device = '/gpu:{}'.format(t)
with tf.variable_scope(tf.get_variable_scope(), reuse=idx > 0), \
tf.device(device), \
PredictTowerContext(tower_name):
logger.info("Building graph for predict tower '{}' on device {} ...".format(tower_name, device))
config.tower_func(*input.get_input_tensors())
handles.append(config.tower_func.towers[-1])
config.session_init._setup_graph()
self.sess = config.session_creator.create_session()
config.session_init._run_init(self.sess)
for h in handles:
input_tensors = h.get_tensors(config.input_names)
output_tensors = h.get_tensors(config.output_names)
self.predictors.append(OnlinePredictor(
input_tensors, output_tensors, config.return_input, self.sess))
def _do_call(self, dp):
# use the first tower for compatible PredictorBase interface
return self.predictors[0]._do_call(dp)
def get_predictor(self, n):
"""
Returns:
OnlinePredictor: the nth predictor on the nth tower.
"""
l = len(self.predictors)
if n >= l:
logger.warn("n > #towers, will assign predictor to GPU by round-robin")
return [self.predictors[k % l] for k in range(n)]
def get_predictors(self):
"""
Returns:
list[OnlinePredictor]: a list of predictor
"""
return self.predictors
class DataParallelOfflinePredictor(OnlinePredictor):
"""
A data-parallel predictor. It builds one predictor that utilizes all GPUs.
Note that it doesn't split/concat inputs/outputs automatically.
Instead, its inputs are:
``[input[0] in tower[0], input[1] in tower[0], ..., input[0] in tower[1], input[1] in tower[1], ...]``
Similar for the outputs.
"""
def __init__(self, config, towers):
"""
Args:
config (PredictConfig): the config to use.
towers: a list of relative GPU id.
"""
self.graph = config._maybe_create_graph()
with self.graph.as_default():
input_tensors = []
output_tensors = []
for idx, t in enumerate(towers):
tower_name = 'tower' + str(t)
new_sig = [tf.TensorSpec(dtype=p.dtype, shape=p.shape, name=tower_name + '_' + p.name)
for p in config.input_signature]
input = PlaceholderInput()
input.setup(new_sig)
with tf.variable_scope(tf.get_variable_scope(), reuse=idx > 0), \
tf.device('/gpu:{}'.format(t)), \
PredictTowerContext(tower_name):
config.tower_func(*input.get_input_tensors())
h = config.tower_func.towers[-1]
input_tensors.extend(h.get_tensors(config.input_names))
output_tensors.extend(h.get_tensors(config.output_names))
config.session_init._setup_graph()
sess = config.session_creator.create_session()
config.session_init._run_init(sess)
super(DataParallelOfflinePredictor, self).__init__(
input_tensors, output_tensors, config.return_input, sess)
| 4,443 | 35.42623 | 116 | py |
SyNet | SyNet-master/tensorpack/tensorpack/graph_builder/training.py | # File: training.py
import copy
import pprint
import re
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import six
import tensorflow as tf
from ..compat import tfv1
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.gradproc import ScaleGradient
from ..tfutils.tower import TrainTowerContext
from ..utils import logger
from ..utils.develop import HIDE_DOC
from .utils import (
GradientPacker, LeastLoadedDeviceSetter, aggregate_grads, allreduce_grads, allreduce_grads_hierarchical,
merge_grad_list, override_to_local_variable, split_grad_list)
__all__ = ["DataParallelBuilder"]
@six.add_metaclass(ABCMeta)
class GraphBuilder(object):
@abstractmethod
def build(*args, **kwargs):
pass
@contextmanager
def _maybe_reuse_vs(reuse):
if reuse:
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
yield
else:
yield
class DataParallelBuilder(GraphBuilder):
def __init__(self, towers):
"""
Args:
towers(list[int]): list of GPU ids.
"""
if len(towers) > 1:
logger.info("[DataParallel] Training a model of {} towers.".format(len(towers)))
if not tf.test.is_built_with_cuda():
logger.error("[DataParallel] TensorFlow was not built with CUDA support!")
self.towers = towers
@staticmethod
def _check_grad_list(grad_list):
"""
Args:
grad_list: list of list of tuples, shape is Ngpu x Nvar x 2
"""
nvars = [len(k) for k in grad_list]
def basename(x):
return re.sub('tower[0-9]+/', '', x.op.name)
if len(set(nvars)) != 1:
names_per_gpu = [{basename(k[1]) for k in grad_and_vars} for grad_and_vars in grad_list]
inters = copy.copy(names_per_gpu[0])
for s in names_per_gpu:
inters &= s
for s in names_per_gpu:
s -= inters
logger.error("Unique trainable variables on towers: " + pprint.pformat(names_per_gpu))
raise ValueError("Number of gradients from each tower is different! " + str(nvars))
@staticmethod
def call_for_each_tower(
towers, func, devices=None, use_vs=None):
"""
Run `func` on all GPUs (towers) and return the results.
Args:
towers (list[int]): a list of GPU id.
func: a lambda to be called inside each tower
devices: a list of devices to be used. By default will use '/gpu:{tower}'
use_vs (list[bool]): list of use_vs to passed to TowerContext
Returns:
List of outputs of ``func``, evaluated on each tower.
"""
ret = []
if devices is not None:
assert len(devices) == len(towers)
if use_vs is not None:
assert len(use_vs) == len(towers)
tower_names = ['tower{}'.format(idx) for idx in range(len(towers))]
for idx, t in enumerate(towers):
device = devices[idx] if devices is not None else '/gpu:{}'.format(t)
usevs = use_vs[idx] if use_vs is not None else False
reuse = not usevs and idx > 0
with tfv1.device(device), _maybe_reuse_vs(reuse), TrainTowerContext(
tower_names[idx],
vs_name=tower_names[idx] if usevs else '',
index=idx, total=len(towers)):
if len(str(device)) < 10: # a device function doesn't have good string description
logger.info("Building graph for training tower {} on device {} ...".format(idx, device))
else:
logger.info("Building graph for training tower {} ...".format(idx))
# When use_vs is True, use LOCAL_VARIABLES,
# so these duplicated variables won't be saved by default.
with override_to_local_variable(enable=usevs):
ret.append(func())
return ret
@staticmethod
@HIDE_DOC
def build_on_towers(*args, **kwargs):
return DataParallelBuilder.call_for_each_tower(*args, **kwargs)
class SyncMultiGPUParameterServerBuilder(DataParallelBuilder):
"""
Data-parallel training in 'ParameterServer' mode.
It builds one tower on each GPU with
shared variable scope. It synchronizes the gradients computed
from each tower, averages them and applies to the shared variables.
It is an equivalent of ``--variable_update=parameter_server`` in
`tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.
"""
def __init__(self, towers, ps_device):
"""
Args:
towers(list[int]): list of GPU id
ps_device (str): either 'gpu' or 'cpu', where variables are stored.
"""
super(SyncMultiGPUParameterServerBuilder, self).__init__(towers)
assert ps_device in ['cpu', 'gpu']
self.ps_device = ps_device
def call_for_each_tower(self, tower_fn):
"""
Call the function `tower_fn` under :class:`TowerContext` for each tower.
Returns:
a list, contains the return values of `tower_fn` on each tower.
"""
raw_devices = ['/gpu:{}'.format(k) for k in self.towers]
if self.ps_device == 'gpu':
devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices]
else:
devices = [tf.train.replica_device_setter(
worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices]
return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices)
def build(self, grad_list, get_opt_fn):
"""
Reduce the gradients, apply them with the optimizer,
and set self.grads to a list of (g, v), containing the averaged gradients.
Args:
grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
tf.Operation: the training op
"""
assert len(grad_list) == len(self.towers)
DataParallelBuilder._check_grad_list(grad_list)
# debug tower performance (without update):
# ops = [k[0] for k in grad_list[1]] + [k[0] for k in grad_list[0]]
# self.train_op = tf.group(*ops)
# return
self.grads = aggregate_grads(grad_list, colocation=True)
# grads = grad_list[0]
opt = get_opt_fn()
if self.ps_device == 'cpu':
with tf.device('/cpu:0'):
train_op = opt.apply_gradients(self.grads, name='train_op')
else:
train_op = opt.apply_gradients(self.grads, name='train_op')
return train_op
class SyncMultiGPUReplicatedBuilder(DataParallelBuilder):
"""
Data-parallel training in "replicated" mode,
where each GPU contains a replicate of the whole model.
It will build one tower on each GPU under its own variable scope.
Each gradient update is averaged or summed across or GPUs through NCCL.
It is an equivalent of ``--variable_update=replicated`` in
`tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.
"""
def __init__(self, towers, average, mode):
super(SyncMultiGPUReplicatedBuilder, self).__init__(towers)
self._average = average
assert mode in ['nccl', 'cpu', 'hierarchical'], mode
self._mode = mode
if self._mode == 'hierarchical' and len(towers) != 8:
logger.warn("mode='hierarchical' require >= 8 GPUs. Fallback to mode='nccl'.")
self._mode = 'nccl'
def call_for_each_tower(self, tower_fn):
"""
Call the function `tower_fn` under :class:`TowerContext` for each tower.
Returns:
a list, contains the return values of `tower_fn` on each tower.
"""
# if tower_fn returns [(grad, var), ...], this returns #GPU x #VAR x 2
return DataParallelBuilder.build_on_towers(
self.towers,
tower_fn,
# use no variable scope for the first tower
use_vs=[False] + [True] * (len(self.towers) - 1))
def build(self, grad_list, get_opt_fn):
"""
Reduce the gradients, apply them with the optimizer,
and set self.grads to #GPU number of lists of (g, v), containing the all-reduced gradients on each device.
Args:
grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
(tf.Operation, tf.Operation)
1. the training op.
2. the op which sync variables from GPU 0 to other GPUs.
It has to be run before the training has started.
And you can optionally run it later to sync non-trainable variables.
"""
assert len(grad_list) == len(self.towers)
raw_devices = ['/gpu:{}'.format(k) for k in self.towers]
DataParallelBuilder._check_grad_list(grad_list)
dtypes = {x[0].dtype.base_dtype for x in grad_list[0]}
dtypes_nccl_supported = [tf.float32, tf.float64]
if get_tf_version_tuple() >= (1, 8):
dtypes_nccl_supported.append(tf.float16)
valid_for_nccl = all(k in dtypes_nccl_supported for k in dtypes)
if self._mode == 'nccl' and not valid_for_nccl:
logger.warn("Cannot use mode='nccl' because some gradients have unsupported types. Fallback to mode='cpu'")
self._mode = 'cpu'
if self._mode in ['nccl', 'hierarchical']:
all_grads, all_vars = split_grad_list(grad_list)
# use allreduce from tf-benchmarks
# from .batch_allreduce import AllReduceSpecAlgorithm
# algo = AllReduceSpecAlgorithm('nccl', list(range(8)), 0, 10)
# all_grads, warmup_ops = algo.batch_all_reduce(all_grads, 1, True, False)
# print("WARMUP OPS", warmup_ops)
if self._mode == 'nccl':
all_grads = allreduce_grads(all_grads, average=self._average) # #gpu x #param
else:
packer = GradientPacker(len(raw_devices))
succ = packer.compute_strategy(all_grads[0])
if succ:
packed_grads = packer.pack_all(all_grads, raw_devices)
packed_grads_aggr = allreduce_grads_hierarchical(
packed_grads, raw_devices, average=self._average)
all_grads = packer.unpack_all(packed_grads_aggr, raw_devices)
else:
all_grads = allreduce_grads_hierarchical(all_grads, raw_devices, average=self._average)
self.grads = merge_grad_list(all_grads, all_vars)
elif self._mode == 'cpu':
agg_grad_and_vars = aggregate_grads(
grad_list, colocation=False,
devices=['/cpu:0'], average=self._average) # #param x 2
self.grads = [] # #gpu x #param x 2
for grad_and_vars in grad_list: # grad_and_vars: #paramx2
# take v from each tower, and g from average.
self.grads.append(
[(g, v) for (_, v), (g, _) in zip(grad_and_vars, agg_grad_and_vars)])
train_ops = []
opt = get_opt_fn()
with tf.name_scope('apply_gradients'):
for idx, grad_and_vars in enumerate(self.grads):
with tf.device(raw_devices[idx]):
# apply_gradients may create variables. Make them LOCAL_VARIABLES
with override_to_local_variable(enable=idx > 0):
train_ops.append(opt.apply_gradients(
grad_and_vars, name='apply_grad_{}'.format(idx)))
train_op = tf.group(*train_ops, name='train_op')
if len(self.towers) > 1:
with tf.name_scope('sync_variables'):
post_init_op = SyncMultiGPUReplicatedBuilder.get_post_init_ops()
else:
post_init_op = None
return train_op, post_init_op
@staticmethod
def get_post_init_ops():
"""
Copy values of variables on GPU 0 to other GPUs.
"""
# literally all variables, because it's better to sync optimizer-internal variables as well
all_vars = tf.global_variables() + tf.local_variables()
var_by_name = {v.name: v for v in all_vars}
trainable_names = {x.name for x in tf.trainable_variables()}
post_init_ops = []
def log_failure(name, reason):
logger.warn("[ReplicatedTrainer] Do not know how to sync variable '{}' across GPUs. "
"Reason: {} ".format(name, reason))
assert name not in trainable_names, \
"The aforementioned variable is trainable, so this is probably a fatal error."
logger.warn(
"[ReplicatedTrainer] This variable is non-trainable. "
"Ignore this warning if you know it's OK to leave it out-of-sync.")
for v in all_vars:
if not v.name.startswith('tower'):
continue
if v.name.startswith('tower0'):
# in this trainer, the master name doesn't have the towerx/ prefix
log_failure(v.name, "Name should not have prefix 'tower0' in this trainer!")
continue # TODO some vars (EMA) may still startswith tower0
split_name = v.name.split('/')
prefix = split_name[0]
realname = '/'.join(split_name[1:])
if prefix in realname:
log_failure(v.name, "Prefix {} appears multiple times in its name!".format(prefix))
continue
copy_from = var_by_name.get(realname)
if copy_from is not None:
post_init_ops.append(v.assign(copy_from.read_value()))
else:
log_failure(v.name, "Cannot find {} in the graph!".format(realname))
logger.info(
"'sync_variables_from_main_tower' includes {} operations.".format(len(post_init_ops)))
return tf.group(*post_init_ops, name='sync_variables_from_main_tower')
class AsyncMultiGPUBuilder(DataParallelBuilder):
"""
Data-parallel training with async update.
It builds one tower on each GPU with shared variable scope.
Every tower computes the gradients and independently applies them to the
variables, without synchronizing and averaging across towers.
"""
def __init__(self, towers, scale_gradient=True):
"""
Args:
towers(list[int]): list of GPU ids.
scale_gradient (bool): if True, will scale each gradient by ``1.0/nr_gpu``.
"""
super(AsyncMultiGPUBuilder, self).__init__(towers)
self._scale_gradient = scale_gradient
def call_for_each_tower(self, tower_fn):
"""
Call the function `tower_fn` under :class:`TowerContext` for each tower.
Returns:
a list, contains the return values of `tower_fn` on each tower.
"""
ps_device = 'cpu' if len(self.towers) >= 4 else 'gpu'
raw_devices = ['/gpu:{}'.format(k) for k in self.towers]
if ps_device == 'gpu':
devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices]
else:
devices = [tf.train.replica_device_setter(
worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices]
return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices)
def build(self, grad_list, get_opt_fn):
"""
Args:
grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
tf.Operation: the training op
"""
assert len(grad_list) == len(self.towers)
DataParallelBuilder._check_grad_list(grad_list)
if self._scale_gradient and len(self.towers) > 1:
# pretend to average the grads, in order to make async and
# sync have consistent effective learning rate
gradproc = ScaleGradient(('.*', 1.0 / len(self.towers)), verbose=False)
grad_list = [gradproc.process(gv) for gv in grad_list]
# Ngpu x Nvar x 2
train_ops = []
opt = get_opt_fn()
with tf.name_scope('async_apply_gradients'):
for i, grad_and_vars in enumerate(zip(*grad_list)):
# Ngpu x 2
v = grad_and_vars[0][1]
with tf.device(v.device):
# will call apply_gradients (therefore gradproc) multiple times
train_ops.append(opt.apply_gradients(
grad_and_vars, name='apply_grad_{}'.format(i)))
return tf.group(*train_ops, name='train_op')
| 17,248 | 40.167064 | 120 | py |
SyNet | SyNet-master/tensorpack/tensorpack/graph_builder/utils.py | # File: utils.py
import operator
from contextlib import contextmanager
import tensorflow as tf
from ..compat import tfv1
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.scope_utils import cached_name_scope, under_name_scope
from ..tfutils.varreplace import custom_getter_scope
from ..utils import logger
from ..utils.argtools import call_only_once
__all__ = ["LeastLoadedDeviceSetter", "allreduce_grads", "aggregate_grads"]
"""
Some utilities for building the graph.
"""
def _replace_global_by_local(kwargs):
if 'collections' in kwargs:
collections = kwargs['collections']
if not collections:
collections = {tf.GraphKeys.GLOBAL_VARIABLES}
else:
collections = set(collections.copy())
collections.remove(tf.GraphKeys.GLOBAL_VARIABLES)
collections.add(tf.GraphKeys.LOCAL_VARIABLES)
kwargs['collections'] = list(collections)
@contextmanager
def override_to_local_variable(enable=True):
"""
Returns:
a context where all variables will be created as local.
"""
if enable:
def custom_getter(getter, name, *args, **kwargs):
_replace_global_by_local(kwargs)
return getter(name, *args, **kwargs)
with custom_getter_scope(custom_getter):
yield
else:
yield
class LeastLoadedDeviceSetter(object):
"""
Helper class to assign variables on the least loaded ps-device.
Usage:
.. code-block:: python
with tf.device(LeastLoadedDeviceSetter(...)):
...
"""
def __init__(self, worker_device, ps_devices):
"""
Args:
worker_device: the device to use for compute ops.
ps_devices: a list of device to use for Variable ops.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
# from tensorflow.python.training.device_util import canonicalize
# from tensorflow.python.distribute.device_util import canonicalize
def canonicalize(name): # tensorflow/tensorflow#11484
return tfv1.DeviceSpec.from_string(name).to_string()
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return canonicalize(self.worker_device)
device_index, _ = min(enumerate(
self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
if var_size is None:
logger.warn("[LeastLoadedDeviceSetter] Shape of variable {} is not fully defined!".format(op.name))
var_size = 0
self.ps_sizes[device_index] += var_size
return canonicalize(device_name)
def __str__(self):
return "LeastLoadedDeviceSetter-{}".format(self.worker_device)
def split_grad_list(grad_list):
"""
Args:
grad_list: K x N x 2
Returns:
K x N: gradients
K x N: variables
"""
g = []
v = []
for tower in grad_list:
g.append([x[0] for x in tower])
v.append([x[1] for x in tower])
return g, v
def merge_grad_list(all_grads, all_vars):
"""
Args:
all_grads (K x N): gradients
all_vars(K x N): variables
Return:
K x N x 2: list of list of (grad, var) pairs
"""
return [list(zip(gs, vs)) for gs, vs in zip(all_grads, all_vars)]
@under_name_scope('AllReduceGrads')
def allreduce_grads(all_grads, average):
"""
All-reduce average the gradients among K devices. Results are broadcasted to all devices.
Args:
all_grads (K x N): List of list of gradients. N is the number of variables.
average (bool): average gradients or not.
Returns:
K x N: same as input, but each grad is replaced by the average over K devices.
"""
if get_tf_version_tuple() <= (1, 12):
from tensorflow.contrib import nccl # deprecated
else:
from tensorflow.python.ops import nccl_ops as nccl
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads
new_all_grads = [] # N x K
for grads in zip(*all_grads):
summed = nccl.all_sum(grads)
grads_for_devices = [] # K
for g in summed:
with tf.device(g.device):
# tensorflow/benchmarks didn't average gradients
if average:
g = tf.multiply(g, 1.0 / nr_tower)
grads_for_devices.append(g)
new_all_grads.append(grads_for_devices)
# transpose to K x N
ret = list(zip(*new_all_grads))
return ret
@under_name_scope('AllReduceGradsHierachical')
def allreduce_grads_hierarchical(all_grads, devices, average=False):
"""
Hierarchical allreduce for DGX-1 system.
Args:
all_grads (K x N): List of list of gradients. N is the number of variables.
devices ([str]): K str for the K devices.
average (bool): average gradients or not.
Returns:
(K x N): same as input, but each grad is replaced by the average over K lists.
"""
num_gpu = len(devices)
assert num_gpu == 8, num_gpu
assert len(all_grads) == num_gpu, len(all_grads)
group_size = num_gpu // 2
agg_all_grads = [] # N x K
for varid, grads in enumerate(zip(*all_grads)):
# grads: K gradients
g0_main_gpu = varid % num_gpu
g1_main_gpu = (g0_main_gpu + group_size) % num_gpu
g0_start = 0 if g0_main_gpu < group_size else group_size
g1_start = 0 if g1_main_gpu < group_size else group_size
assert g0_start != g1_start
g0_grads = grads[g0_start: g0_start + group_size]
g1_grads = grads[g1_start: g1_start + group_size]
with tf.device(devices[g0_main_gpu]):
g0_agg = tf.add_n(g0_grads, name='group0_agg')
with tf.device(devices[g1_main_gpu]):
g1_agg = tf.add_n(g1_grads, name='group1_agg')
g1_total_agg = tf.add(g0_agg, g1_agg, name='group1_total_agg')
with tf.device(devices[g0_main_gpu]):
g0_total_agg = tf.identity(g1_total_agg, name='group0_total_agg')
agg_grads = [] # K aggregated grads
for k in range(num_gpu):
if (k < group_size) == (g0_main_gpu < group_size):
main_gpu = g0_total_agg
else:
main_gpu = g1_total_agg
with tf.device(devices[k]):
if not average:
device_total_agg = tf.identity(
main_gpu, name='device{}_total_agg'.format(k))
else:
# TODO where to put average?
device_total_agg = tf.multiply(
main_gpu, 1.0 / num_gpu, name='device{}_total_agg'.format(k))
agg_grads.append(device_total_agg)
agg_all_grads.append(agg_grads)
# transpose
agg_all_grads = list(zip(*agg_all_grads)) # K x Nvar
return agg_all_grads
@under_name_scope('AggregateGrads')
def aggregate_grads(all_grads,
colocation=False,
devices=None,
average=True):
"""
Average the gradients.
Args:
all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples.
The variables have to be the same across the K lists.
colocation (bool): colocate gradient averaging on the device of the variable.
devices (list[str]): assign the averaging to these device in
round-robin. Cannot be used together with ``colocation``.
average (bool): do average or sum
Returns:
(N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K.
"""
assert not (devices is not None and colocation)
if devices is not None:
assert isinstance(devices, list), devices
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads[0]
def aggregate(grads):
if average:
return tf.multiply(tf.add_n(grads), 1.0 / nr_tower)
else:
return tf.add_n(grads)
ret = []
for idx, grad_and_vars in enumerate(zip(*all_grads)):
# Ngpu * 2
v = grad_and_vars[0][1]
grads = [g for (g, _) in grad_and_vars]
if colocation:
with tf.device(v.device): # colocate summed grad with var
grad = aggregate(grads)
elif devices is None:
grad = aggregate(grads)
else:
dev = devices[idx % len(devices)]
with tf.device(dev):
grad = aggregate(grads)
ret.append((grad, v))
return ret
average_grads = aggregate_grads
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size is None or not kwargs.get('trainable', True):
# TODO a lot of vars won't be saved then
_replace_global_by_local(kwargs)
return getter(*args, **kwargs)
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
class GradientPacker(object):
"""
Concat gradients together to optimize transfer.
"""
def __init__(self, num_split=8):
self._num_split = num_split
@call_only_once
def compute_strategy(self, grads):
"""
Returns:
bool - False if grads cannot be packed due to various reasons.
"""
for g in grads:
assert g.shape.is_fully_defined(), "Shape of {} is {}!".format(g.name, g.shape)
self._shapes = [g.shape for g in grads]
self._sizes = [g.shape.num_elements() for g in grads]
self._total_size = sum(self._sizes)
if self._total_size / self._num_split < 1024:
logger.info("Skip GradientPacker due to too few gradients.")
return False
# should have the same dtype
dtypes = {g.dtype for g in grads}
if len(dtypes) != 1:
logger.info("Skip GradientPacker due to inconsistent gradient types.")
return False
self._grad_dtype = grads[0].dtype
split_size = self._total_size // self._num_split
split_size_last = self._total_size - split_size * (self._num_split - 1)
self._split_sizes = [split_size] * (self._num_split - 1) + [split_size_last]
logger.info(
"Will pack {} gradients of total dimension={} into {} splits.".format(
len(self._sizes), self._total_size, self._num_split))
return True
def pack(self, grads):
"""
Args:
grads (list): list of gradient tensors
Returns:
packed list of gradient tensors to be aggregated.
"""
for i, g in enumerate(grads):
assert g.shape == self._shapes[i]
with cached_name_scope("GradientPacker", top_level=False):
concat_grads = tf.concat([tf.reshape(g, [-1]) for g in grads], 0, name='concatenated_grads')
# concat_grads = tf.cast(concat_grads, tf.float16)
grad_packs = tf.split(concat_grads, self._split_sizes)
return grad_packs
def unpack(self, grad_packs):
with cached_name_scope("GradientPacker", top_level=False):
concat_grads = tf.concat(grad_packs, 0, name='concatenated_packs')
# concat_grads = tf.cast(concat_grads, self._grad_dtype)
flattened_grads = tf.split(concat_grads, self._sizes)
grads = [tf.reshape(g, shape) for g, shape in zip(flattened_grads, self._shapes)]
return grads
def pack_all(self, all_grads, devices):
"""
Args:
all_grads: K x N, K lists of gradients to be packed
"""
ret = [] # #GPU x #split
for dev, grads in zip(devices, all_grads):
with tf.device(dev):
ret.append(self.pack(grads))
return ret
def unpack_all(self, all_packed, devices):
"""
Args:
all_packed: K lists of packed gradients.
"""
all_grads = [] # #GPU x #Var
for dev, packed_grads_single_device in zip(devices, all_packed):
with tf.device(dev):
all_grads.append(self.unpack(packed_grads_single_device))
return all_grads
| 13,752 | 32.874384 | 145 | py |
SyNet | SyNet-master/tensorpack/tensorpack/graph_builder/distributed.py | # File: distributed.py
import re
import tensorflow as tf
from ..tfutils.common import get_global_step_var, get_op_tensor_name
from ..utils import logger
from ..utils.argtools import memoized
from .training import DataParallelBuilder, GraphBuilder
from .utils import OverrideCachingDevice, aggregate_grads, override_to_local_variable
__all__ = []
class DistributedBuilderBase(GraphBuilder):
_sync_queue_counter = 0
def __init__(self, server):
self.server = server
server_def = server.server_def
self.cluster = tf.train.ClusterSpec(server_def.cluster)
self.task_index = server_def.task_index
self.num_ps = self.cluster.num_tasks('ps')
self.num_worker = self.cluster.num_tasks('worker')
def _add_sync_queues_and_barrier(self, name, dependencies):
"""Adds ops to enqueue on all worker queues.
Args:
name: prefixed for the shared_name of ops.
dependencies: control dependency from ops.
Returns:
an op that should be used as control dependency before starting next step.
"""
self._sync_queue_counter += 1
with tf.device(self.sync_queue_devices[self._sync_queue_counter % len(self.sync_queue_devices)]):
sync_queues = [
tf.FIFOQueue(self.num_worker, [tf.bool], shapes=[[]],
shared_name='%s%s' % (name, i))
for i in range(self.num_worker)]
queue_ops = []
# For each other worker, add an entry in a queue, signaling that it can finish this step.
token = tf.constant(False)
with tf.control_dependencies(dependencies):
for i, q in enumerate(sync_queues):
if i != self.task_index:
queue_ops.append(q.enqueue(token))
# Drain tokens off queue for this worker, one for each other worker.
queue_ops.append(
sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))
return tf.group(*queue_ops, name=name)
class DistributedParameterServerBuilder(DataParallelBuilder, DistributedBuilderBase):
"""
Distributed parameter server training.
A single copy of parameters are scattered around PS.
Gradients across GPUs are averaged within the worker, and applied to PS.
Each worker also caches the variables for reading.
It is an equivalent of ``--variable_update=parameter_server`` in
`tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.
However this implementation hasn't been well tested.
It probably still has issues in model saving, etc.
Also, TensorFlow team is not actively maintaining distributed training features.
Check :class:`HorovodTrainer` and
`ResNet-Horovod <https://github.com/tensorpack/benchmarks/tree/master/ResNet-Horovod>`_
for better distributed training support.
Note:
1. Gradients are not averaged across workers, but applied to PS variables
directly (either with or without locking depending on the optimizer).
"""
def __init__(self, towers, server, caching_device):
"""
Args:
towers (list[int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
job_name must be 'worker'.
caching_device (str): either 'cpu' or 'gpu'
"""
DataParallelBuilder.__init__(self, towers)
DistributedBuilderBase.__init__(self, server)
assert caching_device in ['cpu', 'gpu'], caching_device
self.caching_device = caching_device
self.is_chief = (self.task_index == 0)
worker_prefix = '/job:worker/task:%s' % self.task_index
self.param_server_device = tf.train.replica_device_setter(
worker_device=worker_prefix + '/cpu:0', cluster=self.cluster)
self.cpu_device = '%s/cpu:0' % worker_prefix
self.raw_devices = ['{}/gpu:{}'.format(worker_prefix, k) for k in self.towers]
self.sync_queue_devices = ['/job:ps/task:%s/cpu:0' % i for i in range(self.num_ps)]
def build(self, get_grad_fn, get_opt_fn):
ps_strategy = tf.contrib.training.GreedyLoadBalancingStrategy(
self.num_ps, tf.contrib.training.byte_size_load_fn)
devices = [
tf.train.replica_device_setter(
worker_device=d,
cluster=self.cluster,
ps_strategy=ps_strategy) for d in self.raw_devices]
if self.caching_device == 'gpu':
caching_devices = self.raw_devices
else:
caching_devices = [self.cpu_device]
custom_getter = OverrideCachingDevice(
caching_devices, self.cpu_device, 1024 * 64)
with tf.variable_scope(tf.get_variable_scope(), custom_getter=custom_getter):
grad_list = DataParallelBuilder.build_on_towers(self.towers, get_grad_fn, devices)
DataParallelBuilder._check_grad_list(grad_list)
with tf.device(self.param_server_device):
grads = aggregate_grads(grad_list, colocation=False)
opt = get_opt_fn()
train_op = opt.apply_gradients(grads, name='train_op')
train_op = self._add_sync_queues_and_barrier('all_workers_sync_barrier', [train_op])
return train_op
class DistributedReplicatedBuilder(DataParallelBuilder, DistributedBuilderBase):
"""
Distributed replicated training.
Each worker process builds the same model on one or more GPUs.
Gradients across GPUs are averaged within the worker,
and get synchronously applied to the global copy of variables located on PS.
Then each worker copy the latest variables from PS back to local.
It is an equivalent of ``--variable_update=distributed_replicated`` in
`tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.
Note that the performance of this trainer is still not satisfactory,
and TensorFlow team is not actively maintaining distributed training features.
Check :class:`HorovodTrainer` and
`ResNet-Horovod <https://github.com/tensorpack/benchmarks/tree/master/ResNet-Horovod>`_
for better distributed training support.
Note:
1. Gradients are not averaged across workers, but applied to PS variables
directly (either with or without locking depending on the optimizer).
2. Some details about collections: all variables created inside tower
will become local variables,
and a clone will be made in global variables for all trainable/model variables.
Example:
.. code-block:: python
# Create the server object like this:
hosts = ['host1.com', 'host2.com']
cluster_spec = tf.train.ClusterSpec({
'ps': [h + ':2222' for h in hosts],
'worker': [h + ':2223' for h in hosts]
})
server = tf.train.Server(
cluster_spec, job_name=args.job, task_index=args.task,
config=get_default_sess_config())
# initialize trainer with this server object
.. code-block:: none
# Start training like this:
(host1)$ ./train.py --job worker --task 0
(host1)$ CUDA_VISIBLE_DEVICES= ./train.py --job ps --task 0
(host2)$ ./train.py --job worker --task 1
(host2)$ CUDA_VISIBLE_DEVICES= ./train.py --job ps --task 1
"""
def __init__(self, towers, server):
"""
Args:
towers (list[int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
job_name must be 'worker'.
"""
DataParallelBuilder.__init__(self, towers)
DistributedBuilderBase.__init__(self, server)
self.is_chief = (self.task_index == 0)
worker_prefix = '/job:worker/task:%s' % self.task_index
self.param_server_device = tf.train.replica_device_setter(
worker_device=worker_prefix + '/cpu:0', cluster=self.cluster)
self.nr_gpu = len(self.towers)
self.cpu_device = '%s/cpu:0' % worker_prefix
self.raw_devices = ['%s/gpu:%i' % (worker_prefix, i) for i in towers]
# Device for queues for managing synchronization between servers
self.sync_queue_devices = ['/job:ps/task:%s/cpu:0' % i for i in range(self.num_ps)]
@staticmethod
def _apply_shadow_vars(avg_grads):
"""
Create shadow variables on PS, and replace variables in avg_grads
by these shadow variables.
Args:
avg_grads: list of (grad, var) tuples
"""
ps_var_grads = []
for grad, var in avg_grads:
assert var.name.startswith('tower'), var.name
my_name = '/'.join(var.name.split('/')[1:])
my_name = get_op_tensor_name(my_name)[0]
new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype,
initializer=var.initial_value,
trainable=True)
# (g, v) to be applied, where v is global (ps vars)
ps_var_grads.append((grad, new_v))
return ps_var_grads
@staticmethod
def _shadow_model_variables(shadow_vars):
"""
Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.
Returns:
list of (shadow_model_var, local_model_var) used for syncing.
"""
G = tf.get_default_graph()
curr_shadow_vars = {v.name for v in shadow_vars}
model_vars = tf.model_variables()
shadow_model_vars = []
for v in model_vars:
assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!"
stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name))
if stripped_op_name in curr_shadow_vars:
continue
try:
G.get_tensor_by_name(stripped_var_name)
logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name))
continue
except KeyError:
pass
new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype,
initializer=v.initial_value,
trainable=False)
curr_shadow_vars.add(stripped_op_name) # avoid duplicated shadow_model_vars
shadow_vars.append(new_v)
shadow_model_vars.append((new_v, v)) # only need to sync model_var from one tower
return shadow_model_vars
def build(self, get_grad_fn, get_opt_fn):
"""
Args:
get_grad_fn (-> [(grad, var)]):
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
(tf.Operation, tf.Operation, tf.Operation):
1. the training op.
2. the op which sync all the local variables from PS.
This op should be run before training.
3. the op which sync all the local `MODEL_VARIABLES` from PS.
You can choose how often to run it by yourself.
"""
with override_to_local_variable():
get_global_step_var()
get_opt_fn = memoized(get_opt_fn)
# Build the optimizer first, before entering any tower.
# This makes sure that learning_rate is a global variable (what we expect)
get_opt_fn() # TODO get_opt_fn called before main graph was built
# Ngpu * Nvar * 2
grad_list = DataParallelBuilder.build_on_towers(
self.towers, get_grad_fn,
devices=self.raw_devices,
use_vs=[True] * len(self.towers)) # open vs at each tower
DataParallelBuilder._check_grad_list(grad_list)
avg_grads = aggregate_grads(
grad_list, colocation=False, devices=self.raw_devices)
with tf.device(self.param_server_device):
ps_var_grads = DistributedReplicatedBuilder._apply_shadow_vars(avg_grads)
var_update_ops = self._apply_gradients_and_copy(
get_opt_fn(), grad_list, ps_var_grads)
self._shadow_vars = [v for (__, v) in ps_var_grads]
self._shadow_model_vars = DistributedReplicatedBuilder._shadow_model_variables(self._shadow_vars)
# TODO add options to synchronize less
main_fetch = tf.group(*var_update_ops, name='main_fetches')
train_op = self._add_sync_queues_and_barrier(
'post_copy_barrier', [main_fetch])
# initial local_vars syncing
with tf.name_scope('initial_sync_variables'):
initial_sync_op = self._get_initial_sync_op()
if len(self._shadow_model_vars) and self.is_chief:
with tf.name_scope('sync_model_variables'):
model_sync_op = self._get_sync_model_vars_op()
else:
model_sync_op = None
return train_op, initial_sync_op, model_sync_op
def _apply_gradients_and_copy(self, opt, raw_grad_list, ps_var_grads):
"""
Apply averaged gradients to ps vars, and then copy the updated
variables back to each tower.
Args:
raw_grad_list: Ngpu x Nvar x 2 gradient list from all towers
ps_var_grads: Nvar x 2 (grad, ps_var)
Returns:
list of copy ops
"""
# TODO do this for variables together?
with tf.name_scope('apply_gradients'):
var_update_ops = []
for vid, (g, v) in enumerate(ps_var_grads):
# TODO do we put momentum variables into local or global?
apply_gradient_op = opt.apply_gradients([(g, v)])
barrier = self._add_sync_queues_and_barrier(
'param_update_barrier_{}'.format(vid), [apply_gradient_op])
with tf.control_dependencies([barrier]), \
tf.device(self.cpu_device):
updated_value = v.read_value()
for towerid in range(self.nr_gpu):
var_update_ops.append(
raw_grad_list[towerid][vid][1].assign(updated_value))
return var_update_ops
def _get_initial_sync_op(self):
"""
Get the op to copy-initialized all local variables from PS.
"""
def strip_port(s):
if s.endswith(':0'):
return s[:-2]
return s
local_vars = tf.local_variables()
local_var_by_name = {strip_port(v.name): v for v in local_vars}
ops = []
nr_shadow_vars = len(self._shadow_vars)
for v in self._shadow_vars:
vname = strip_port(v.name)
for i in range(self.nr_gpu):
name = 'tower%s/%s' % (i, vname)
assert name in local_var_by_name, \
"Shadow variable {} doesn't match a corresponding local variable!".format(v.name)
copy_to = local_var_by_name[name]
# logger.info("{} -> {}".format(v.name, copy_to.name))
ops.append(copy_to.assign(v.read_value()))
return tf.group(*ops, name='sync_{}_variables_from_ps'.format(nr_shadow_vars))
def _get_sync_model_vars_op(self):
"""
Get the op to sync local model_variables to PS.
"""
ops = []
for (shadow_v, local_v) in self._shadow_model_vars:
ops.append(shadow_v.assign(local_v.read_value()))
assert len(ops)
return tf.group(*ops, name='sync_{}_model_variables_to_ps'.format(len(ops)))
| 15,716 | 41.25 | 114 | py |
SyNet | SyNet-master/tensorpack/tensorpack/graph_builder/model_desc.py | # File: model_desc.py
from ..train.model_desc import ModelDesc, ModelDescBase # kept for BC # noqa
__all__ = []
| 141 | 14.777778 | 77 | py |
pytorch-playground | pytorch-playground-master/setup.py | from setuptools import setup, find_packages
with open("requirements.txt") as requirements_file:
REQUIREMENTS = requirements_file.readlines()
setup(
name="pytorch-playground",
version="1.0.0",
author='Aaron Chen',
author_email='aaron.xichen@gmail.com',
packages=find_packages(),
entry_points = {
'console_scripts': [
'quantize=quantize:main',
]
},
install_requires=REQUIREMENTS,
)
| 447 | 21.4 | 51 | py |
pytorch-playground | pytorch-playground-master/quantize.py | import argparse
from utee import misc, quant, selector
import torch
import torch.backends.cudnn as cudnn
cudnn.benchmark =True
from collections import OrderedDict
def main():
parser = argparse.ArgumentParser(description='PyTorch SVHN Example')
parser.add_argument('--type', default='cifar10', help='|'.join(selector.known_models))
parser.add_argument('--quant_method', default='linear', help='linear|minmax|log|tanh')
parser.add_argument('--batch_size', type=int, default=100, help='input batch size for training (default: 64)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=8, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--model_root', default='~/.torch/models/', help='folder to save the model')
parser.add_argument('--data_root', default='/data/public_dataset/pytorch/', help='folder to save the model')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--input_size', type=int, default=224, help='input size of image')
parser.add_argument('--n_sample', type=int, default=20, help='number of samples to infer the scaling factor')
parser.add_argument('--param_bits', type=int, default=8, help='bit-width for parameters')
parser.add_argument('--bn_bits', type=int, default=32, help='bit-width for running mean and std')
parser.add_argument('--fwd_bits', type=int, default=8, help='bit-width for layer output')
parser.add_argument('--overflow_rate', type=float, default=0.0, help='overflow rate')
args = parser.parse_args()
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
misc.ensure_dir(args.logdir)
args.model_root = misc.expand_user(args.model_root)
args.data_root = misc.expand_user(args.data_root)
args.input_size = 299 if 'inception' in args.type else args.input_size
assert args.quant_method in ['linear', 'minmax', 'log', 'tanh']
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
assert torch.cuda.is_available(), 'no cuda'
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# load model and dataset fetcher
model_raw, ds_fetcher, is_imagenet = selector.select(args.type, model_root=args.model_root)
args.ngpu = args.ngpu if is_imagenet else 1
# quantize parameters
if args.param_bits < 32:
state_dict = model_raw.state_dict()
state_dict_quant = OrderedDict()
sf_dict = OrderedDict()
for k, v in state_dict.items():
if 'running' in k:
if args.bn_bits >=32:
print("Ignoring {}".format(k))
state_dict_quant[k] = v
continue
else:
bits = args.bn_bits
else:
bits = args.param_bits
if args.quant_method == 'linear':
sf = bits - 1. - quant.compute_integral_part(v, overflow_rate=args.overflow_rate)
v_quant = quant.linear_quantize(v, sf, bits=bits)
elif args.quant_method == 'log':
v_quant = quant.log_minmax_quantize(v, bits=bits)
elif args.quant_method == 'minmax':
v_quant = quant.min_max_quantize(v, bits=bits)
else:
v_quant = quant.tanh_quantize(v, bits=bits)
state_dict_quant[k] = v_quant
print(k, bits)
model_raw.load_state_dict(state_dict_quant)
# quantize forward activation
if args.fwd_bits < 32:
model_raw = quant.duplicate_model_with_quant(model_raw, bits=args.fwd_bits, overflow_rate=args.overflow_rate,
counter=args.n_sample, type=args.quant_method)
print(model_raw)
val_ds_tmp = ds_fetcher(10, data_root=args.data_root, train=False, input_size=args.input_size)
misc.eval_model(model_raw, val_ds_tmp, ngpu=1, n_sample=args.n_sample, is_imagenet=is_imagenet)
# eval model
val_ds = ds_fetcher(args.batch_size, data_root=args.data_root, train=False, input_size=args.input_size)
acc1, acc5 = misc.eval_model(model_raw, val_ds, ngpu=args.ngpu, is_imagenet=is_imagenet)
# print sf
print(model_raw)
res_str = "type={}, quant_method={}, param_bits={}, bn_bits={}, fwd_bits={}, overflow_rate={}, acc1={:.4f}, acc5={:.4f}".format(
args.type, args.quant_method, args.param_bits, args.bn_bits, args.fwd_bits, args.overflow_rate, acc1, acc5)
print(res_str)
with open('acc1_acc5.txt', 'a') as f:
f.write(res_str + '\n')
if __name__ == '__main__':
main()
| 4,928 | 48.29 | 132 | py |
pytorch-playground | pytorch-playground-master/svhn/model.py | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import os
from collections import OrderedDict
from utee import misc
print = misc.logger.info
model_urls = {
'svhn': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/svhn-f564f3d8.pth',
}
class SVHN(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(SVHN, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
print(self.features)
print(self.classifier)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU(), nn.Dropout(0.3)]
else:
layers += [conv2d, nn.ReLU(), nn.Dropout(0.3)]
in_channels = out_channels
return nn.Sequential(*layers)
def svhn(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = SVHN(layers, n_channel=8*n_channel, num_classes=10)
if pretrained is not None:
m = model_zoo.load_url(model_urls['svhn'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
| 2,056 | 33.864407 | 122 | py |
pytorch-playground | pytorch-playground-master/svhn/dataset.py | import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import os
def get(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'svhn-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building SVHN data loader with {} workers".format(num_workers))
def target_transform(target):
return int(target) - 1
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(
root=data_root, split='train', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]),
target_transform=target_transform,
),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.SVHN(
root=data_root, split='test', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]),
target_transform=target_transform
),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
| 1,565 | 34.590909 | 93 | py |
pytorch-playground | pytorch-playground-master/svhn/train.py | import argparse
import os
import time
from utee import misc
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
from IPython import embed
parser = argparse.ArgumentParser(description='PyTorch SVHN Example')
parser.add_argument('--channel', type=int, default=32, help='first conv channel (default: 32)')
parser.add_argument('--wd', type=float, default=0.001, help='weight decay')
parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=150, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=2, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=100, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--data_root', default='/tmp/public_dataset/pytorch/', help='folder to save the model')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader and model
train_loader, test_loader = dataset.get(batch_size=args.batch_size, data_root=args.data_root, num_workers=1)
model = model.svhn(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
model.cuda()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output.data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
loss.data[0], acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda().long().squeeze()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss / len(test_loader) # average over number of mini-batch
acc = 100. * correct / len(test_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset), acc))
if acc > best_acc:
new_file = os.path.join(args.logdir, 'best-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, old_file=old_file, verbose=True)
best_acc = acc
old_file = new_file
except Exception as e:
import traceback
traceback.print_exc()
finally:
print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(time.time()-t_begin, best_acc))
| 5,590 | 43.023622 | 125 | py |
pytorch-playground | pytorch-playground-master/stl10/model.py | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import os
from utee import misc
from collections import OrderedDict
print = misc.logger.info
model_urls = {
'stl10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/stl10-866321e9.pth',
}
class SVHN(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(SVHN, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
print(self.features)
print(self.classifier)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = out_channels
return nn.Sequential(*layers)
def stl10(n_channel, pretrained=None):
cfg = [
n_channel, 'M',
2*n_channel, 'M',
4*n_channel, 'M',
4*n_channel, 'M',
(8*n_channel, 0), (8*n_channel, 0), 'M'
]
layers = make_layers(cfg, batch_norm=True)
model = SVHN(layers, n_channel=8*n_channel, num_classes=10)
if pretrained is not None:
m = model_zoo.load_url(model_urls['stl10'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
| 2,071 | 30.876923 | 89 | py |
pytorch-playground | pytorch-playground-master/stl10/dataset.py | import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from IPython import embed
import os
def get(batch_size, data_root='/mnt/local0/public_dataset/pytorch/', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'stl10-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building STL10 data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.STL10(
root=data_root, split='train', download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(96),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.STL10(
root=data_root, split='test', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
if __name__ == '__main__':
train_ds, test_ds = get(200, num_workers=1)
for data, target in train_ds:
print("~~")
| 1,678 | 36.311111 | 101 | py |
pytorch-playground | pytorch-playground-master/stl10/train.py | import argparse
import os
import time
from utee import misc
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
from IPython import embed
parser = argparse.ArgumentParser(description='PyTorch SVHN Example')
parser.add_argument('--channel', type=int, default=32, help='first conv channel (default: 32)')
parser.add_argument('--wd', type=float, default=0.00, help='weight decay')
parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=150, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=2, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=20, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader and model
train_loader, test_loader = dataset.get(batch_size=args.batch_size, num_workers=1)
model = model.stl10(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
model.cuda()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
# ready to go
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output.data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
loss.data[0], acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda().long().squeeze()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss / len(test_loader) # average over number of mini-batch
acc = 100. * correct / len(test_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset), acc))
if acc > best_acc:
new_file = os.path.join(args.logdir, 'best-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, old_file=old_file, verbose=True)
best_acc = acc
old_file = new_file
except Exception as e:
import traceback
traceback.print_exc()
finally:
print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(time.time()-t_begin, best_acc))
| 5,473 | 42.102362 | 124 | py |
pytorch-playground | pytorch-playground-master/imagenet/inception.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utee import misc
from collections import OrderedDict
__all__ = ['Inception3', 'inception_v3']
model_urls = {
'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
}
def inception_v3(pretrained=False, model_root=None, **kwargs):
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
model = Inception3(**kwargs)
misc.load_state_dict(model, model_urls['inception_v3_google'], model_root)
return model
return Inception3(**kwargs)
class Inception3(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.group1 = nn.Sequential(
OrderedDict([
('fc', nn.Linear(2048, num_classes))
])
)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
m.weight.data.copy_(values.reshape(m.weight.shape))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
if self.transform_input:
x = x.clone()
x[0] = x[0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x[1] = x[1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x[2] = x[2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
if self.training and self.aux_logits:
aux = self.AuxLogits(x)
# 17 x 17 x 768
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
x = F.dropout(x, training=self.training)
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
x = self.group1(x)
# 1000 (num_classes)
if self.training and self.aux_logits:
return x, aux
return x
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
self.conv1 = BasicConv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
fc = nn.Linear(768, num_classes)
fc.stddev = 0.001
self.group1 = nn.Sequential(
OrderedDict([
('fc', fc)
])
)
def forward(self, x):
# 17 x 17 x 768
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# 5 x 5 x 768
x = self.conv0(x)
# 5 x 5 x 128
x = self.conv1(x)
# 1 x 1 x 768
x = x.view(x.size(0), -1)
# 768
x = self.group1(x)
# 1000
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.group1 = nn.Sequential(
OrderedDict([
('conv', nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)),
('bn', nn.BatchNorm2d(out_channels, eps=0.001))
])
)
def forward(self, x):
x = self.group1(x)
return F.relu(x, inplace=True)
| 11,908 | 34.549254 | 98 | py |
pytorch-playground | pytorch-playground-master/imagenet/resnet.py | import torch.nn as nn
import math
from utee import misc
from collections import OrderedDict
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
# "3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
m = OrderedDict()
m['conv1'] = conv3x3(inplanes, planes, stride)
m['bn1'] = nn.BatchNorm2d(planes)
m['relu1'] = nn.ReLU(inplace=True)
m['conv2'] = conv3x3(planes, planes)
m['bn2'] = nn.BatchNorm2d(planes)
self.group1 = nn.Sequential(m)
self.relu= nn.Sequential(nn.ReLU(inplace=True))
self.downsample = downsample
def forward(self, x):
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
out = self.group1(x) + residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
m = OrderedDict()
m['conv1'] = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
m['bn1'] = nn.BatchNorm2d(planes)
m['relu1'] = nn.ReLU(inplace=True)
m['conv2'] = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
m['bn2'] = nn.BatchNorm2d(planes)
m['relu2'] = nn.ReLU(inplace=True)
m['conv3'] = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
m['bn3'] = nn.BatchNorm2d(planes * 4)
self.group1 = nn.Sequential(m)
self.relu= nn.Sequential(nn.ReLU(inplace=True))
self.downsample = downsample
def forward(self, x):
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
out = self.group1(x) + residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
m = OrderedDict()
m['conv1'] = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
m['bn1'] = nn.BatchNorm2d(64)
m['relu1'] = nn.ReLU(inplace=True)
m['maxpool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.group1= nn.Sequential(m)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.Sequential(nn.AvgPool2d(7))
self.group2 = nn.Sequential(
OrderedDict([
('fc', nn.Linear(512 * block.expansion, num_classes))
])
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.group1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.group2(x)
return x
def resnet18(pretrained=False, model_root=None, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet18'], model_root)
return model
def resnet34(pretrained=False, model_root=None, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet34'], model_root)
return model
def resnet50(pretrained=False, model_root=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet50'], model_root)
return model
def resnet101(pretrained=False, model_root=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet101'], model_root)
return model
def resnet152(pretrained=False, model_root=None, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet152'], model_root)
return model
| 5,916 | 32.055866 | 109 | py |
pytorch-playground | pytorch-playground-master/imagenet/squeezenet.py | import math
import torch
import torch.nn as nn
from utee import misc
from collections import OrderedDict
__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']
model_urls = {
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.group1 = nn.Sequential(
OrderedDict([
('squeeze', nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)),
('squeeze_activation', nn.ReLU(inplace=True))
])
)
self.group2 = nn.Sequential(
OrderedDict([
('expand1x1', nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1)),
('expand1x1_activation', nn.ReLU(inplace=True))
])
)
self.group3 = nn.Sequential(
OrderedDict([
('expand3x3', nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)),
('expand3x3_activation', nn.ReLU(inplace=True))
])
)
def forward(self, x):
x = self.group1(x)
return torch.cat([self.group2(x),self.group3(x)], 1)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=1000):
super(SqueezeNet, self).__init__()
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently form the rest
final_conv = nn.Conv2d(512, num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
nn.ReLU(inplace=True),
nn.AvgPool2d(13)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
gain = 2.0
if m is final_conv:
m.weight.data.normal_(0, 0.01)
else:
fan_in = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
u = math.sqrt(3.0 * gain / fan_in)
m.weight.data.uniform_(-u, u)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def squeezenet1_0(pretrained=False, model_root=None, **kwargs):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
"""
model = SqueezeNet(version=1.0, **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['squeezenet1_0'], model_root)
return model
def squeezenet1_1(pretrained=False, model_root=None, **kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
"""
model = SqueezeNet(version=1.1, **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['squeezenet1_1'], model_root)
return model
| 5,022 | 35.398551 | 101 | py |
pytorch-playground | pytorch-playground-master/imagenet/vgg.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11(pretrained=False, model_root=None, **kwargs):
"""VGG 11-layer model (configuration "A")"""
model = VGG(make_layers(cfg['A']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11'], model_root))
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
kwargs.pop('model_root', None)
return VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
def vgg13(pretrained=False, model_root=None, **kwargs):
"""VGG 13-layer model (configuration "B")"""
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13'], model_root))
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
kwargs.pop('model_root', None)
return VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
def vgg16(pretrained=False, model_root=None, **kwargs):
"""VGG 16-layer model (configuration "D")"""
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16'], model_root))
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
kwargs.pop('model_root', None)
return VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
def vgg19(pretrained=False, model_root=None, **kwargs):
"""VGG 19-layer model (configuration "E")"""
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19'], model_root))
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
kwargs.pop('model_root', None)
return VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
| 4,505 | 32.132353 | 113 | py |
pytorch-playground | pytorch-playground-master/imagenet/dataset.py | from utee import misc
import os
import os.path
import numpy as np
import joblib
def get(batch_size, data_root='/tmp/public_dataset/pytorch', train=False, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'imagenet-data'))
print("Building IMAGENET data loader, 50000 for train, 50000 for test")
ds = []
assert train is not True, 'train not supported yet'
if train:
ds.append(IMAGENET(data_root, batch_size, True, **kwargs))
if val:
ds.append(IMAGENET(data_root, batch_size, False, **kwargs))
ds = ds[0] if len(ds) == 1 else ds
return ds
class IMAGENET(object):
def __init__(self, root, batch_size, train=False, input_size=224, **kwargs):
self.mean = np.array([0.485, 0.456, 0.406]).reshape(1, 1, 1, 3)
self.std = np.array([0.229, 0.224, 0.225]).reshape(1, 1, 1, 3)
self.train = train
if train:
pkl_file = os.path.join(root, 'train{}.pkl'.format(input_size))
else:
pkl_file = os.path.join(root, 'val{}.pkl'.format(input_size))
self.data_dict = joblib.load(pkl_file)
self.batch_size = batch_size
self.idx = 0
@property
def n_batch(self):
return int(np.ceil(self.n_sample* 1.0 / self.batch_size))
@property
def n_sample(self):
return len(self.data_dict['data'])
def __len__(self):
return self.n_batch
def __iter__(self):
return self
def __next__(self):
if self.idx >= self.n_batch:
self.idx = 0
raise StopIteration
else:
img = self.data_dict['data'][self.idx*self.batch_size:(self.idx+1)*self.batch_size].astype('float32')
target = self.data_dict['target'][self.idx*self.batch_size:(self.idx+1)*self.batch_size]
self.idx += 1
return img, target
if __name__ == '__main__':
train_ds, val_ds = get(200)
| 1,927 | 29.603175 | 113 | py |
pytorch-playground | pytorch-playground-master/imagenet/alexnet.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['AlexNet', 'alexnet']
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
def alexnet(pretrained=False, model_root=None, **kwargs):
model = AlexNet(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet'], model_root))
return model
| 1,637 | 29.333333 | 84 | py |
pytorch-playground | pytorch-playground-master/mnist/model.py | import torch.nn as nn
from collections import OrderedDict
import torch.utils.model_zoo as model_zoo
from utee import misc
print = misc.logger.info
model_urls = {
'mnist': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/mnist-b07bb66b.pth'
}
class MLP(nn.Module):
def __init__(self, input_dims, n_hiddens, n_class):
super(MLP, self).__init__()
assert isinstance(input_dims, int), 'Please provide int for input_dims'
self.input_dims = input_dims
current_dims = input_dims
layers = OrderedDict()
if isinstance(n_hiddens, int):
n_hiddens = [n_hiddens]
else:
n_hiddens = list(n_hiddens)
for i, n_hidden in enumerate(n_hiddens):
layers['fc{}'.format(i+1)] = nn.Linear(current_dims, n_hidden)
layers['relu{}'.format(i+1)] = nn.ReLU()
layers['drop{}'.format(i+1)] = nn.Dropout(0.2)
current_dims = n_hidden
layers['out'] = nn.Linear(current_dims, n_class)
self.model= nn.Sequential(layers)
print(self.model)
def forward(self, input):
input = input.view(input.size(0), -1)
assert input.size(1) == self.input_dims
return self.model.forward(input)
def mnist(input_dims=784, n_hiddens=[256, 256], n_class=10, pretrained=None):
model = MLP(input_dims, n_hiddens, n_class)
if pretrained is not None:
m = model_zoo.load_url(model_urls['mnist'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
| 1,660 | 34.340426 | 85 | py |
pytorch-playground | pytorch-playground-master/mnist/dataset.py | from torch.utils.data import DataLoader
import torch
from torchvision import datasets, transforms
import os
def get(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'mnist-data'))
kwargs.pop('input_size', None)
num_workers = kwargs.setdefault('num_workers', 1)
print("Building MNIST data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=data_root, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
| 1,398 | 41.393939 | 93 | py |
pytorch-playground | pytorch-playground-master/mnist/train.py | import argparse
import os
import time
from utee import misc
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay')
parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=40, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 1e-3)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=1, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=100, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--data_root', default='/tmp/public_dataset/pytorch/', help='folder to save the model')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader
train_loader, test_loader = dataset.get(batch_size=args.batch_size, data_root=args.data_root, num_workers=1)
# model
model = model.mnist(input_dims=784, n_hiddens=[256, 256], n_class=10)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
model.cuda()
# optimizer
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wd, momentum=0.9)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
# ready to go
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output.data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
loss.data, acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss / len(test_loader) # average over number of mini-batch
acc = 100. * correct / len(test_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset), acc))
if acc > best_acc:
new_file = os.path.join(args.logdir, 'best-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, old_file=old_file, verbose=True)
best_acc = acc
old_file = new_file
except Exception as e:
import traceback
traceback.print_exc()
finally:
print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(time.time()-t_begin, best_acc))
| 5,502 | 41.992188 | 125 | py |
pytorch-playground | pytorch-playground-master/cifar/model.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from IPython import embed
from collections import OrderedDict
from utee import misc
print = misc.logger.info
model_urls = {
'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
'cifar100': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth',
}
class CIFAR(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(CIFAR, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
print(self.features)
print(self.classifier)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = out_channels
return nn.Sequential(*layers)
def cifar10(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=10)
if pretrained is not None:
m = model_zoo.load_url(model_urls['cifar10'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
def cifar100(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=100)
if pretrained is not None:
m = model_zoo.load_url(model_urls['cifar100'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = cifar10(128, pretrained='log/cifar10/best-135.pth')
embed()
| 2,809 | 36.972973 | 122 | py |
pytorch-playground | pytorch-playground-master/cifar/dataset.py | import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import os
def get10(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar10-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building CIFAR-10 data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=data_root, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def get100(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar100-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building CIFAR-100 data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root=data_root, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
| 2,937 | 40.380282 | 96 | py |
pytorch-playground | pytorch-playground-master/cifar/train.py | import argparse
import os
import time
from utee import misc
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
from IPython import embed
parser = argparse.ArgumentParser(description='PyTorch CIFAR-X Example')
parser.add_argument('--type', default='cifar10', help='cifar10|cifar100')
parser.add_argument('--channel', type=int, default=128, help='first conv channel (default: 32)')
parser.add_argument('--wd', type=float, default=0.00, help='weight decay')
parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=150, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=2, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=100, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader and model
assert args.type in ['cifar10', 'cifar100'], args.type
if args.type == 'cifar10':
train_loader, test_loader = dataset.get10(batch_size=args.batch_size, num_workers=1)
model = model.cifar10(n_channel=args.channel)
else:
train_loader, test_loader = dataset.get100(batch_size=args.batch_size, num_workers=1)
model = model.cifar100(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
model.cuda()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
# ready to go
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output.data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
loss.data[0], acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss / len(test_loader) # average over number of mini-batch
acc = 100. * correct / len(test_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset), acc))
if acc > best_acc:
new_file = os.path.join(args.logdir, 'best-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, old_file=old_file, verbose=True)
best_acc = acc
old_file = new_file
except Exception as e:
import traceback
traceback.print_exc()
finally:
print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(time.time()-t_begin, best_acc))
| 5,777 | 42.119403 | 125 | py |
pytorch-playground | pytorch-playground-master/utee/quant.py | from torch.autograd import Variable
import torch
from torch import nn
from collections import OrderedDict
import math
from IPython import embed
def compute_integral_part(input, overflow_rate):
abs_value = input.abs().view(-1)
sorted_value = abs_value.sort(dim=0, descending=True)[0]
split_idx = int(overflow_rate * len(sorted_value))
v = sorted_value[split_idx]
if isinstance(v, Variable):
v = float(v.data.cpu())
sf = math.ceil(math.log2(v+1e-12))
return sf
def linear_quantize(input, sf, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input) - 1
delta = math.pow(2.0, -sf)
bound = math.pow(2.0, bits-1)
min_val = - bound
max_val = bound - 1
rounded = torch.floor(input / delta + 0.5)
clipped_value = torch.clamp(rounded, min_val, max_val) * delta
return clipped_value
def log_minmax_quantize(input, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input), 0.0, 0.0
s = torch.sign(input)
input0 = torch.log(torch.abs(input) + 1e-20)
v = min_max_quantize(input0, bits-1)
v = torch.exp(v) * s
return v
def log_linear_quantize(input, sf, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input), 0.0, 0.0
s = torch.sign(input)
input0 = torch.log(torch.abs(input) + 1e-20)
v = linear_quantize(input0, sf, bits-1)
v = torch.exp(v) * s
return v
def min_max_quantize(input, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input) - 1
min_val, max_val = input.min(), input.max()
if isinstance(min_val, Variable):
max_val = float(max_val.data.cpu().numpy()[0])
min_val = float(min_val.data.cpu().numpy()[0])
input_rescale = (input - min_val) / (max_val - min_val)
n = math.pow(2.0, bits) - 1
v = torch.floor(input_rescale * n + 0.5) / n
v = v * (max_val - min_val) + min_val
return v
def tanh_quantize(input, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input)
input = torch.tanh(input) # [-1, 1]
input_rescale = (input + 1.0) / 2 #[0, 1]
n = math.pow(2.0, bits) - 1
v = torch.floor(input_rescale * n + 0.5) / n
v = 2 * v - 1 # [-1, 1]
v = 0.5 * torch.log((1 + v) / (1 - v)) # arctanh
return v
class LinearQuant(nn.Module):
def __init__(self, name, bits, sf=None, overflow_rate=0.0, counter=10):
super(LinearQuant, self).__init__()
self.name = name
self._counter = counter
self.bits = bits
self.sf = sf
self.overflow_rate = overflow_rate
@property
def counter(self):
return self._counter
def forward(self, input):
if self._counter > 0:
self._counter -= 1
sf_new = self.bits - 1 - compute_integral_part(input, self.overflow_rate)
self.sf = min(self.sf, sf_new) if self.sf is not None else sf_new
return input
else:
output = linear_quantize(input, self.sf, self.bits)
return output
def __repr__(self):
return '{}(sf={}, bits={}, overflow_rate={:.3f}, counter={})'.format(
self.__class__.__name__, self.sf, self.bits, self.overflow_rate, self.counter)
class LogQuant(nn.Module):
def __init__(self, name, bits, sf=None, overflow_rate=0.0, counter=10):
super(LogQuant, self).__init__()
self.name = name
self._counter = counter
self.bits = bits
self.sf = sf
self.overflow_rate = overflow_rate
@property
def counter(self):
return self._counter
def forward(self, input):
if self._counter > 0:
self._counter -= 1
log_abs_input = torch.log(torch.abs(input))
sf_new = self.bits - 1 - compute_integral_part(log_abs_input, self.overflow_rate)
self.sf = min(self.sf, sf_new) if self.sf is not None else sf_new
return input
else:
output = log_linear_quantize(input, self.sf, self.bits)
return output
def __repr__(self):
return '{}(sf={}, bits={}, overflow_rate={:.3f}, counter={})'.format(
self.__class__.__name__, self.sf, self.bits, self.overflow_rate, self.counter)
class NormalQuant(nn.Module):
def __init__(self, name, bits, quant_func):
super(NormalQuant, self).__init__()
self.name = name
self.bits = bits
self.quant_func = quant_func
@property
def counter(self):
return self._counter
def forward(self, input):
output = self.quant_func(input, self.bits)
return output
def __repr__(self):
return '{}(bits={})'.format(self.__class__.__name__, self.bits)
def duplicate_model_with_quant(model, bits, overflow_rate=0.0, counter=10, type='linear'):
"""assume that original model has at least a nn.Sequential"""
assert type in ['linear', 'minmax', 'log', 'tanh']
if isinstance(model, nn.Sequential):
l = OrderedDict()
for k, v in model._modules.items():
if isinstance(v, (nn.Conv2d, nn.Linear, nn.BatchNorm1d, nn.BatchNorm2d, nn.AvgPool2d)):
l[k] = v
if type == 'linear':
quant_layer = LinearQuant('{}_quant'.format(k), bits=bits, overflow_rate=overflow_rate, counter=counter)
elif type == 'log':
# quant_layer = LogQuant('{}_quant'.format(k), bits=bits, overflow_rate=overflow_rate, counter=counter)
quant_layer = NormalQuant('{}_quant'.format(k), bits=bits, quant_func=log_minmax_quantize)
elif type == 'minmax':
quant_layer = NormalQuant('{}_quant'.format(k), bits=bits, quant_func=min_max_quantize)
else:
quant_layer = NormalQuant('{}_quant'.format(k), bits=bits, quant_func=tanh_quantize)
l['{}_{}_quant'.format(k, type)] = quant_layer
else:
l[k] = duplicate_model_with_quant(v, bits, overflow_rate, counter, type)
m = nn.Sequential(l)
return m
else:
for k, v in model._modules.items():
model._modules[k] = duplicate_model_with_quant(v, bits, overflow_rate, counter, type)
return model
| 6,302 | 32.705882 | 124 | py |
pytorch-playground | pytorch-playground-master/utee/misc.py | import cv2
import os
import shutil
import pickle as pkl
import time
import numpy as np
import hashlib
from IPython import embed
class Logger(object):
def __init__(self):
self._logger = None
def init(self, logdir, name='log'):
if self._logger is None:
import logging
if not os.path.exists(logdir):
os.makedirs(logdir)
log_file = os.path.join(logdir, name)
if os.path.exists(log_file):
os.remove(log_file)
self._logger = logging.getLogger()
self._logger.setLevel('INFO')
fh = logging.FileHandler(log_file)
ch = logging.StreamHandler()
self._logger.addHandler(fh)
self._logger.addHandler(ch)
def info(self, str_info):
self.init('/tmp', 'tmp.log')
self._logger.info(str_info)
logger = Logger()
print = logger.info
def ensure_dir(path, erase=False):
if os.path.exists(path) and erase:
print("Removing old folder {}".format(path))
shutil.rmtree(path)
if not os.path.exists(path):
print("Creating folder {}".format(path))
os.makedirs(path)
def load_pickle(path):
begin_st = time.time()
with open(path, 'rb') as f:
print("Loading pickle object from {}".format(path))
v = pkl.load(f)
print("=> Done ({:.4f} s)".format(time.time() - begin_st))
return v
def dump_pickle(obj, path):
with open(path, 'wb') as f:
print("Dumping pickle object to {}".format(path))
pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)
def auto_select_gpu(mem_bound=500, utility_bound=0, gpus=(0, 1, 2, 3, 4, 5, 6, 7), num_gpu=1, selected_gpus=None):
import sys
import os
import subprocess
import re
import time
import numpy as np
if 'CUDA_VISIBLE_DEVCIES' in os.environ:
sys.exit(0)
if selected_gpus is None:
mem_trace = []
utility_trace = []
for i in range(5): # sample 5 times
info = subprocess.check_output('nvidia-smi', shell=True).decode('utf-8')
mem = [int(s[:-5]) for s in re.compile('\d+MiB\s/').findall(info)]
utility = [int(re.compile('\d+').findall(s)[0]) for s in re.compile('\d+%\s+Default').findall(info)]
mem_trace.append(mem)
utility_trace.append(utility)
time.sleep(0.1)
mem = np.mean(mem_trace, axis=0)
utility = np.mean(utility_trace, axis=0)
assert(len(mem) == len(utility))
nGPU = len(utility)
ideal_gpus = [i for i in range(nGPU) if mem[i] <= mem_bound and utility[i] <= utility_bound and i in gpus]
if len(ideal_gpus) < num_gpu:
print("No sufficient resource, available: {}, require {} gpu".format(ideal_gpus, num_gpu))
sys.exit(0)
else:
selected_gpus = list(map(str, ideal_gpus[:num_gpu]))
else:
selected_gpus = selected_gpus.split(',')
print("Setting GPU: {}".format(selected_gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(selected_gpus)
return selected_gpus
def expand_user(path):
return os.path.abspath(os.path.expanduser(path))
def model_snapshot(model, new_file, old_file=None, verbose=False):
from collections import OrderedDict
import torch
if isinstance(model, torch.nn.DataParallel):
model = model.module
if old_file and os.path.exists(expand_user(old_file)):
if verbose:
print("Removing old model {}".format(expand_user(old_file)))
os.remove(expand_user(old_file))
if verbose:
print("Saving model to {}".format(expand_user(new_file)))
state_dict = OrderedDict()
for k, v in model.state_dict().items():
if v.is_cuda:
v = v.cpu()
state_dict[k] = v
torch.save(state_dict, expand_user(new_file))
def load_lmdb(lmdb_file, n_records=None):
import lmdb
import numpy as np
lmdb_file = expand_user(lmdb_file)
if os.path.exists(lmdb_file):
data = []
env = lmdb.open(lmdb_file, readonly=True, max_readers=512)
with env.begin() as txn:
cursor = txn.cursor()
begin_st = time.time()
print("Loading lmdb file {} into memory".format(lmdb_file))
for key, value in cursor:
_, target, _ = key.decode('ascii').split(':')
target = int(target)
img = cv2.imdecode(np.fromstring(value, np.uint8), cv2.IMREAD_COLOR)
data.append((img, target))
if n_records is not None and len(data) >= n_records:
break
env.close()
print("=> Done ({:.4f} s)".format(time.time() - begin_st))
return data
else:
print("Not found lmdb file".format(lmdb_file))
def str2img(str_b):
return cv2.imdecode(np.fromstring(str_b, np.uint8), cv2.IMREAD_COLOR)
def img2str(img):
return cv2.imencode('.jpg', img)[1].tostring()
def md5(s):
m = hashlib.md5()
m.update(s)
return m.hexdigest()
def eval_model(model, ds, n_sample=None, ngpu=1, is_imagenet=False):
import tqdm
import torch
from torch import nn
from torch.autograd import Variable
class ModelWrapper(nn.Module):
def __init__(self, model):
super(ModelWrapper, self).__init__()
self.model = model
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
def forward(self, input):
input.data.div_(255.)
input.data[:, 0, :, :].sub_(self.mean[0]).div_(self.std[0])
input.data[:, 1, :, :].sub_(self.mean[1]).div_(self.std[1])
input.data[:, 2, :, :].sub_(self.mean[2]).div_(self.std[2])
return self.model(input)
correct1, correct5 = 0, 0
n_passed = 0
if is_imagenet:
model = ModelWrapper(model)
model = model.eval()
model = torch.nn.DataParallel(model, device_ids=range(ngpu)).cuda()
n_sample = len(ds) if n_sample is None else n_sample
for idx, (data, target) in enumerate(tqdm.tqdm(ds, total=n_sample)):
n_passed += len(data)
data = Variable(torch.FloatTensor(data)).cuda()
indx_target = torch.LongTensor(target)
output = model(data)
bs = output.size(0)
idx_pred = output.data.sort(1, descending=True)[1]
idx_gt1 = indx_target.expand(1, bs).transpose_(0, 1)
idx_gt5 = idx_gt1.expand(bs, 5)
correct1 += idx_pred[:, :1].cpu().eq(idx_gt1).sum()
correct5 += idx_pred[:, :5].cpu().eq(idx_gt5).sum()
if idx >= n_sample - 1:
break
acc1 = correct1 * 1.0 / n_passed
acc5 = correct5 * 1.0 / n_passed
return acc1, acc5
def load_state_dict(model, model_urls, model_root):
from torch.utils import model_zoo
from torch import nn
import re
from collections import OrderedDict
own_state_old = model.state_dict()
own_state = OrderedDict() # remove all 'group' string
for k, v in own_state_old.items():
k = re.sub('group\d+\.', '', k)
own_state[k] = v
state_dict = model_zoo.load_url(model_urls, model_root)
for name, param in state_dict.items():
if name not in own_state:
print(own_state.keys())
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
missing = set(own_state.keys()) - set(state_dict.keys())
no_use = set(state_dict.keys()) - set(own_state.keys())
if len(no_use) > 0:
raise KeyError('some keys are not used: "{}"'.format(no_use))
| 7,772 | 32.943231 | 114 | py |
pytorch-playground | pytorch-playground-master/utee/selector.py | from utee import misc
import os
from imagenet import dataset
print = misc.logger.info
from IPython import embed
known_models = [
'mnist', 'svhn', # 28x28
'cifar10', 'cifar100', # 32x32
'stl10', # 96x96
'alexnet', # 224x224
'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn', # 224x224
'resnet18', 'resnet34', 'resnet50', 'resnet101','resnet152', # 224x224
'squeezenet_v0', 'squeezenet_v1', #224x224
'inception_v3', # 299x299
]
def mnist(cuda=True, model_root=None):
print("Building and initializing mnist parameters")
from mnist import model, dataset
m = model.mnist(pretrained=os.path.join(model_root, 'mnist.pth'))
if cuda:
m = m.cuda()
return m, dataset.get, False
def svhn(cuda=True, model_root=None):
print("Building and initializing svhn parameters")
from svhn import model, dataset
m = model.svhn(32, pretrained=os.path.join(model_root, 'svhn.pth'))
if cuda:
m = m.cuda()
return m, dataset.get, False
def cifar10(cuda=True, model_root=None):
print("Building and initializing cifar10 parameters")
from cifar import model, dataset
m = model.cifar10(128, pretrained=os.path.join(model_root, 'cifar10.pth'))
if cuda:
m = m.cuda()
return m, dataset.get10, False
def cifar100(cuda=True, model_root=None):
print("Building and initializing cifar100 parameters")
from cifar import model, dataset
m = model.cifar100(128, pretrained=os.path.join(model_root, 'cifar100.pth'))
if cuda:
m = m.cuda()
return m, dataset.get100, False
def stl10(cuda=True, model_root=None):
print("Building and initializing stl10 parameters")
from stl10 import model, dataset
m = model.stl10(32, pretrained=os.path.join(model_root, 'stl10.pth'))
if cuda:
m = m.cuda()
return m, dataset.get, False
def alexnet(cuda=True, model_root=None):
print("Building and initializing alexnet parameters")
from imagenet import alexnet as alx
m = alx.alexnet(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def vgg16(cuda=True, model_root=None):
print("Building and initializing vgg16 parameters")
from imagenet import vgg
m = vgg.vgg16(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def vgg16_bn(cuda=True, model_root=None):
print("Building vgg16_bn parameters")
from imagenet import vgg
m = vgg.vgg16_bn(model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def vgg19(cuda=True, model_root=None):
print("Building and initializing vgg19 parameters")
from imagenet import vgg
m = vgg.vgg19(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def vgg19_bn(cuda=True, model_root=None):
print("Building vgg19_bn parameters")
from imagenet import vgg
m = vgg.vgg19_bn(model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def inception_v3(cuda=True, model_root=None):
print("Building and initializing inception_v3 parameters")
from imagenet import inception
m = inception.inception_v3(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet18(cuda=True, model_root=None):
print("Building and initializing resnet-18 parameters")
from imagenet import resnet
m = resnet.resnet18(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet34(cuda=True, model_root=None):
print("Building and initializing resnet-34 parameters")
from imagenet import resnet
m = resnet.resnet34(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet50(cuda=True, model_root=None):
print("Building and initializing resnet-50 parameters")
from imagenet import resnet
m = resnet.resnet50(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet101(cuda=True, model_root=None):
print("Building and initializing resnet-101 parameters")
from imagenet import resnet
m = resnet.resnet101(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet152(cuda=True, model_root=None):
print("Building and initializing resnet-152 parameters")
from imagenet import resnet
m = resnet.resnet152(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def squeezenet_v0(cuda=True, model_root=None):
print("Building and initializing squeezenet_v0 parameters")
from imagenet import squeezenet
m = squeezenet.squeezenet1_0(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def squeezenet_v1(cuda=True, model_root=None):
print("Building and initializing squeezenet_v1 parameters")
from imagenet import squeezenet
m = squeezenet.squeezenet1_1(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def select(model_name, **kwargs):
assert model_name in known_models, model_name
kwargs.setdefault('model_root', os.path.expanduser('~/.torch/models'))
return eval('{}'.format(model_name))(**kwargs)
if __name__ == '__main__':
m1 = alexnet()
embed()
| 5,245 | 29.5 | 80 | py |
pytorch-playground | pytorch-playground-master/script/convert.py | import os
import numpy as np
import tqdm
from utee import misc
import argparse
import cv2
import joblib
parser = argparse.ArgumentParser(description='Extract the ILSVRC2012 val dataset')
parser.add_argument('--in_file', default='val224_compressed.pkl', help='input file path')
parser.add_argument('--out_root', default='/data/public_dataset/pytorch/imagenet-data/', help='output file path')
args = parser.parse_args()
d = misc.load_pickle(args.in_file)
assert len(d['data']) == 50000, len(d['data'])
assert len(d['target']) == 50000, len(d['target'])
data299 = []
for img, target in tqdm.tqdm(zip(d['data'], d['target']), total=50000):
img224 = misc.str2img(img)
img299 = cv2.resize(img224, (299, 299))
data299.append(img299)
data_dict299 = dict(
data = np.array(data299).transpose(0, 3, 1, 2),
target = d['target']
)
if not os.path.exists(args.out_root):
os.makedirs(args.out_root)
joblib.dump(data_dict299, os.path.join(args.out_root, 'val299.pkl'))
data299.clear()
data_dict299.clear()
data224 = []
for img, target in tqdm.tqdm(zip(d['data'], d['target']), total=50000):
img224 = misc.str2img(img)
data224.append(img224)
data_dict224 = dict(
data = np.array(data224).transpose(0, 3, 1, 2),
target = d['target']
)
joblib.dump(data_dict224, os.path.join(args.out_root, 'val224.pkl'))
| 1,337 | 25.76 | 113 | py |
coling2018-xling_argument_mining | coling2018-xling_argument_mining-master/code/annotationProjection/readDocs.py | import sys
def readDoc(fn,index0=1):
hh=[]
hd = {}
h=[]
for line in open(fn):
line = line.strip()
if line=="":
if h!=[]:
hh.append(h)
str=" ".join([x[0] for x in h])
hd[str] = h
h=[]
else:
x = line.split("\t")
word,label = x[index0],x[-1]
h.append((word,label))
if h!=[]:
hh.append(h)
str=" ".join([x[0] for x in h])
hd[str] = h
return hh,hd
| 434 | 17.125 | 39 | py |
coling2018-xling_argument_mining | coling2018-xling_argument_mining-master/code/annotationProjection/projectArguments.py | import sys
from readDocs import readDoc as rd
# project argument spans from source to target document
# Steffen Eger
# 03/2018
# SAMPLE USAGE:
# python2 projectArguments.py train_full.dat test_full.dat dev_full.dat essays.aligned essays.aligned.bidirectional
# Inputs:
# $x_full.dat: train, test, dev annotated data in source language
# essays.aligned: aligned sentences in source and target language (source sentences must all be in train/dev/test.dat)
# essays.aligned.bidirectional: word alignments (e.g., produced by fast_align)
# Outputs:
# my${x}_gen1.dat: train, test, dev projected annotation spans in the target language
K=1
def isConsecutive(lst,descending=False):
last = None
for x in lst:
if last is not None:
next = last-1 if descending else last+1
if x!=next: return False
last = x
return True
def findExtremeConsecutive(lst,reverse=True,k=1):
s = sorted(lst,reverse=reverse)
for ix,x in enumerate(s):
mylst = s[ix:ix+k]
if isConsecutive(mylst,descending=reverse): return x
return s[0]
def detect_bios(labels):
indices = []
startComponent=False
startindex = None
type = None
for index,tok in enumerate(labels):
word,token = tok
if startComponent==True and token.startswith("B-"):
endindex = index-1
indices.append((startindex,endindex,type))
startindex = index
type = token.split(":")[0][2:]
startComponent = True
elif startComponent==True and token.startswith("O"):
endindex = index-1
indices.append((startindex,endindex,type))
startComponent = False
elif token.startswith("B-"):
type = token.split(":")[0][2:]
startComponent = True
startindex = index
if token.startswith("I-"):
endindex = index
indices.append((startindex,endindex,type))
return indices
def getTranslationIndices(indices,align):
h = {}
for y in align.split():
a,b = list(map(int,y.split("-")))
if a in h:
h[a] = h[a]+[b]
else:
h[a] = [b]
#print(h,align,indices)
#sys.exit(1)
aligns=[]
for x in indices:
start,end,type = x
q = []
for z in range(start,end+1):
#print("-->",z,h)
#print(h[z])
q.append( h.get(z,None) )
qq = list(filter(lambda x: x!=None,q))
flat_list = [item for sublist in qq for item in sublist]
#print("##->",flat_list,x)
#print(flat_list); sys.exit(1)
# YOU MAY WANT TO CHANGE THIS
indexStart,indexEnd = min(flat_list),max(flat_list)
for myK in range(K,0,-1):
indexStart,indexEnd = findExtremeConsecutive(flat_list,reverse=False,k=K),findExtremeConsecutive(flat_list,reverse=True,k=myK)
if len(aligns)>0:
indexEndPrev = aligns[-1][1]
indexStartPrev = aligns[-1][0]
if indexStart<=indexEndPrev:
sys.stderr.write("DOESN'T WORK OUT %d %d\n"%(indexStart,indexEndPrev))
if indexEnd<indexStartPrev:
sys.stderr.write("Li'l non-monotonity\n")
break
indexStart = indexEndPrev+1
if indexStart<=indexEnd: break
if indexStart>indexEnd:
sys.stderr.write(str(aligns))
sys.stderr.write("ERROR SOMEWHERE: %d %d\n"%(indexStart,indexEnd));
#sys.exit(1)
print(indices)
aligns.append((indexStart,indexEnd,type))
#print(aligns)
return aligns
def printout(sequence,fout,type="O"):
for itoken,token in enumerate(sequence):
if type!="O":
if itoken==0:
pre="B-"
else:
pre="I-"
else:
pre=""
fout.write(token+"\t"+pre+type+"\n")
def process(sentences,sentences_alignments,labels,fout,verbose=False):
n = len(sentences)
last = 0
for i in range(len(sentences)):
en,de = sentences[i]
en_tokens = en.split()
de_tokens = de.split()
m = len(en_tokens)
align = sentences_alignments[i].strip()
curLabels = labels[last:last+m]
indices = detect_bios(curLabels)
last = last+m
#print(en_tokens,"\t",curLabels,"\t",de_tokens,"\t",indices)
#print(align)
aligns = sorted( getTranslationIndices(indices,align) )
if verbose:
print("ALIGNS",aligns,de)
#if aligns!=[]:
prev = 0
for start,end,type in aligns:
if start>end: continue
before = de_tokens[prev:start]
middle = de_tokens[start:end+1]
if before!=[]: printout(before,fout)
printout(middle,fout,type)
prev = end+1
after = de_tokens[prev:]
if after!=[]:
printout(after,fout)
#sys.exit(1)
train,train_hash = rd(sys.argv[1])
test,test_hash = rd(sys.argv[2])
dev,dev_hash = rd(sys.argv[3])
#print(train_hash)
alignedText = sys.argv[4]
alignments = sys.argv[5]
fp_lines=open(alignments).readlines()
acc=[]
sentences=[]
sentences_alignments=[]
i=0
ftrain=open("mytrain_gen%d.dat"%K,"w")
ftest=open("mytest_gen%d.dat"%K,"w")
fdev=open("mydev_gen%d.dat"%K,"w")
for line in open(alignedText):
line = line.strip()
en,de = line.split(" ||| ")
sentences.append((en,de))
sentences_alignments.append(fp_lines[i])
acc+=en.split()
acc_text = " ".join(acc)
#print(acc_text+"<--")
for hash in [train_hash,test_hash,dev_hash]:
if acc_text in hash:
if hash==train_hash: fout = ftrain
elif hash==test_hash: fout = ftest
elif hash==dev_hash: fout = fdev
else: fout=None
labels = hash[acc_text]
process(sentences,sentences_alignments,labels,fout)
fout.write("\n")
acc = []
sentences=[]
sentences_alignments=[]
i+=1
| 5,486 | 27.878947 | 132 | py |
checklist | checklist-master/setup.py | from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.egg_info import egg_info
from setuptools.command.build_py import build_py
from subprocess import check_call
import sys
import os
def enable_visual_interface():
check_call(f'"{sys.executable}"'+" -m pip install jupyter", shell=True)
import notebook
notebook.nbextensions.install_nbextension_python(
"checklist.viewer", user=True, overwrite=True)
notebook.nbextensions.enable_nbextension_python(
"checklist.viewer")
def enable_visual_interface_shell_cmd(direction):
sys.path.append(direction)
enable_visual_interface()
#"""
class PostDevelopCommand(develop):
"""Pre-installation for development mode."""
def run(self):
develop.run(self)
#enable_visual_interface()
self.execute(enable_visual_interface_shell_cmd, (self.install_lib,), msg="Running post install task")
class BdistEggCommand(bdist_egg):
def run(self):
bdist_egg.run(self)
enable_visual_interface()
#self.execute(enable_visual_interface_shell_cmd, (self.install_lib,), msg=f"Running post install task on {sys.executable}")
class BuildPyCommand(build_py):
def run(self):
build_py.run(self)
enable_visual_interface()
#self.execute(enable_visual_interface_shell_cmd, (self.install_lib,), msg="Running post install task")
class PostInstallCommand(install):
def run(self):
#super().do_egg_install()
install.run(self)
self.execute(enable_visual_interface_shell_cmd, (self.install_lib,), msg="Running post install task")
#enable_visual_interface()
class EggInfoCommand(egg_info):
def run(self):
egg_info.run(self)
enable_visual_interface()
#self.execute(enable_visual_interface_shell_cmd, (self.install_lib,), msg="Running post install task")
setup(name='checklist',
version='0.0.11',
description='Beyond Accuracy: Behavioral Testing of NLP Models with CheckList',
url='http://github.com/marcotcr/checklist',
author='Marco Tulio Ribeiro',
author_email='marcotcr@gmail.com',
license='MIT',
packages= find_packages(exclude=['js', 'node_modules', 'tests']),
install_requires=[
'numpy>=1.18',
'spacy>=2.2',
'munch>=2.5',
'dill>=0.3.1',
'jupyter>=1.0',
'ipywidgets>=7.5',
'transformers>=2.8',
'patternfork-nosql',
'iso-639'
],
cmdclass={
'develop': PostDevelopCommand,
'install': PostInstallCommand,
'bdist_egg': BdistEggCommand,
'egg_info': EggInfoCommand,
'build_py': BuildPyCommand,
},
package_data={'viewer':['static/*'], "data": ["*"], 'checklist': ['data/*', 'data/lexicons/*', 'viewer/static/*']},
#include_package_data=True,
zip_safe=False
)
| 3,011 | 33.62069 | 131 | py |
checklist | checklist-master/checklist/perturb.py | import numpy as np
import collections
import re
import os
import json
import pattern
from pattern.en import tenses
from .editor import recursive_apply, MunchWithAdd
def load_data():
cur_folder = os.path.dirname(__file__)
basic = json.load(open(os.path.join(cur_folder, 'data', 'lexicons', 'basic.json')))
names = json.load(open(os.path.join(cur_folder, 'data', 'names.json')))
name_set = { x:set(names[x]) for x in names }
data = {
'name': names,
'name_set': name_set,
'city': basic['city'],
'country': basic['country'],
}
return data
def process_ret(ret, ret_m=None, meta=False, n=10):
if ret:
if len(ret) > n:
idxs = np.random.choice(len(ret), n, replace=False)
ret = [ret[i] for i in idxs]
if ret_m:
ret_m = [ret_m[i] for i in idxs]
if meta:
ret = (ret, ret_m)
return ret
return None
class Perturb:
data = load_data()
@staticmethod
def perturb(data, perturb_fn, keep_original=True, nsamples=None, *args, **kwargs):
"""Perturbs data according to some function
Parameters
----------
data : list
List of examples, could be strings, tuples, dicts, spacy docs, whatever
perturb_fn : function
Arguments: (example, *args, **kwargs)
Returns: list of examples, or (examples, meta) if meta=True in **kwargs.
Can also return None if perturbation does not apply, and it will be ignored.
keep_original : bool
if True, include original example (from data) in output
nsamples : int
number of examples in data to perturb
meta : bool
if True, perturb_fn returns (examples, meta), and meta is added to ret.meta
Returns
-------
MunchWithAdd
will have .data and .meta (if meta=True in **kwargs)
"""
ret = MunchWithAdd()
use_meta = kwargs.get('meta', False)
ret_data = []
meta = []
order = list(range(len(data)))
samples = 0
if nsamples:
np.random.shuffle(order)
for i in order:
d = data[i]
t = []
add = []
if keep_original:
org = recursive_apply(d, str)
t.append(org)
add.append(None)
p = perturb_fn(d, *args, **kwargs)
a = []
x = []
if not p or all([not x for x in p]):
continue
if use_meta:
p, a = p
if type(p) in [np.array, list]:
t.extend(p)
add.extend(a)
else:
t.append(p)
add.append(a)
ret_data.append(t)
meta.append(add)
samples += 1
if nsamples and samples == nsamples:
break
ret.data = ret_data
if use_meta:
ret.meta = meta
return ret
@staticmethod
def strip_punctuation(doc):
"""Removes punctuation
Parameters
----------
doc : spacy.tokens.Doc
spacy doc
Returns
-------
string
With punctuation stripped
"""
# doc is a spacy doc
while len(doc) and doc[-1].pos_ == 'PUNCT':
doc = doc[:-1]
return doc.text
@staticmethod
def punctuation(doc):
"""Perturbation function which adds / removes punctuations
Parameters
----------
doc : spacy.tokens.Doc
spacy doc
Returns
-------
list(string)
With punctuation removed and / or final stop added.
"""
# doc is a spacy doc
s = Perturb.strip_punctuation(doc)
ret = []
if s != doc.text:
ret.append(s)
if s + '.' != doc.text:
ret.append(s + '.')
return ret
@staticmethod
def add_typos(string, typos=1):
"""Perturbation functions, swaps random characters with their neighbors
Parameters
----------
string : str
input string
typos : int
number of typos to add
Returns
-------
list(string)
perturbed strings
"""
string = list(string)
swaps = np.random.choice(len(string) - 1, typos)
for swap in swaps:
tmp = string[swap]
string[swap] = string[swap + 1]
string[swap + 1] = tmp
return ''.join(string)
@staticmethod
def remove_negation(doc):
"""Removes negation from doc.
This is experimental, may or may not work.
Parameters
----------
doc : spacy.token.Doc
input
Returns
-------
string
With all negations removed
"""
# This removes all negations in the doc. I should maybe add an option to remove just some.
notzs = [i for i, z in enumerate(doc) if z.lemma_ == 'not' or z.dep_ == 'neg']
new = []
for notz in notzs:
before = doc[notz - 1] if notz != 0 else None
after = doc[notz + 1] if len(doc) > notz + 1 else None
if (after and after.pos_ == 'PUNCT') or (before and before.text in ['or']):
continue
new.append(notz)
notzs = new
if not notzs:
return None
ret = ''
start = 0
for i, notz in enumerate(notzs):
id_start = notz
to_add = ' '
id_end = notz + 1
before = doc[notz - 1] if notz != 0 else None
after = doc[notz + 1] if len(doc) > notz + 1 else None
if before and before.lemma_ in ['will', 'can', 'do']:
id_start = notz - 1
tenses = collections.Counter([x[0] for x in pattern.en.tenses(before.text)]).most_common(1)
tense = tenses[0][0] if len(tenses) else 'present'
p = pattern.en.tenses(before.text)
params = [tense, 3]
if p:
tmp = [x for x in p if x[0] == tense]
if tmp:
params = list(tmp[0])
else:
params = list(p[0])
to_add = ' '+ pattern.en.conjugate(before.lemma_, *params) + ' '
if before and after and before.lemma_ == 'do' and after.pos_ == 'VERB':
id_start = notz - 1
tenses = collections.Counter([x[0] for x in pattern.en.tenses(before.text)]).most_common(1)
tense = tenses[0][0] if len(tenses) else 'present'
p = pattern.en.tenses(before.text)
params = [tense, 3]
if p:
tmp = [x for x in p if x[0] == tense]
if tmp:
params = list(tmp[0])
else:
params = list(p[0])
to_add = ' '+ pattern.en.conjugate(after.text, *params) + ' '
id_end = notz + 2
ret += doc[start:id_start].text + to_add
start = id_end
ret += doc[id_end:].text
return ret
@staticmethod
def add_negation(doc):
"""Adds negation to doc
This is experimental, may or may not work. It also only works for specific parses.
Parameters
----------
doc : spacy.token.Doc
input
Returns
-------
string
With negations added
"""
for sentence in doc.sents:
if len(sentence) < 3:
continue
root_id = [x.i for x in sentence if x.dep_ == 'ROOT'][0]
root = doc[root_id]
if '?' in sentence.text and sentence[0].text.lower() == 'how':
continue
if root.lemma_.lower() in ['thank', 'use']:
continue
if root.pos_ not in ['VERB', 'AUX']:
continue
neg = [True for x in sentence if x.dep_ == 'neg' and x.head.i == root_id]
if neg:
continue
if root.lemma_ == 'be':
if '?' in sentence.text:
continue
if root.text.lower() in ['is', 'was', 'were', 'am', 'are', '\'s', '\'re', '\'m']:
return doc[:root_id + 1].text + ' not ' + doc[root_id + 1:].text
else:
return doc[:root_id].text + ' not ' + doc[root_id:].text
else:
aux = [x for x in sentence if x.dep_ in ['aux', 'auxpass'] and x.head.i == root_id]
if aux:
aux = aux[0]
if aux.lemma_.lower() in ['can', 'do', 'could', 'would', 'will', 'have', 'should']:
lemma = doc[aux.i].lemma_.lower()
if lemma == 'will':
fixed = 'won\'t'
elif lemma == 'have' and doc[aux.i].text in ['\'ve', '\'d']:
fixed = 'haven\'t' if doc[aux.i].text == '\'ve' else 'hadn\'t'
elif lemma == 'would' and doc[aux.i].text in ['\'d']:
fixed = 'wouldn\'t'
else:
fixed = doc[aux.i].text.rstrip('n') + 'n\'t' if lemma != 'will' else 'won\'t'
fixed = ' %s ' % fixed
return doc[:aux.i].text + fixed + doc[aux.i + 1:].text
return doc[:root_id].text + ' not ' + doc[root_id:].text
else:
# TODO: does, do, etc. Remover return None de cima
subj = [x for x in sentence if x.dep_ in ['csubj', 'nsubj']]
p = pattern.en.tenses(root.text)
tenses = collections.Counter([x[0] for x in pattern.en.tenses(root.text)]).most_common(1)
tense = tenses[0][0] if len(tenses) else 'present'
params = [tense, 3]
if p:
tmp = [x for x in p if x[0] == tense]
if tmp:
params = list(tmp[0])
else:
params = list(p[0])
if root.tag_ not in ['VBG']:
do = pattern.en.conjugate('do', *params) + 'n\'t'
new_root = pattern.en.conjugate(root.text, tense='infinitive')
else:
do = 'not'
new_root = root.text
return '%s %s %s %s' % (doc[:root_id].text, do, new_root, doc[root_id + 1:].text)
@staticmethod
def contractions(sentence, **kwargs):
"""Perturbation functions, contracts and expands contractions if present
Parameters
----------
sentence : str
input
Returns
-------
list
List of strings with contractions expanded or contracted, or []
"""
expanded = [Perturb.expand_contractions(sentence), Perturb.contract(sentence)]
return [t for t in expanded if t != sentence]
@staticmethod
def expand_contractions(sentence, **kwargs):
"""Expands contractions in a sentence (if any)
Parameters
----------
sentence : str
input string
Returns
-------
string
String with contractions expanded (if any)
"""
contraction_map = {
"ain't": "is not", "aren't": "are not", "can't": "cannot",
"can't've": "cannot have", "could've": "could have", "couldn't":
"could not", "didn't": "did not", "doesn't": "does not", "don't":
"do not", "hadn't": "had not", "hasn't": "has not", "haven't":
"have not", "he'd": "he would", "he'd've": "he would have",
"he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y":
"how do you", "how'll": "how will", "how's": "how is",
"I'd": "I would", "I'll": "I will", "I'm": "I am",
"I've": "I have", "i'd": "i would", "i'll": "i will",
"i'm": "i am", "i've": "i have", "isn't": "is not",
"it'd": "it would", "it'll": "it will", "it's": "it is", "ma'am":
"madam", "might've": "might have", "mightn't": "might not",
"must've": "must have", "mustn't": "must not", "needn't":
"need not", "oughtn't": "ought not", "shan't": "shall not",
"she'd": "she would", "she'll": "she will", "she's": "she is",
"should've": "should have", "shouldn't": "should not", "that'd":
"that would", "that's": "that is", "there'd": "there would",
"there's": "there is", "they'd": "they would",
"they'll": "they will", "they're": "they are",
"they've": "they have", "wasn't": "was not", "we'd": "we would",
"we'll": "we will", "we're": "we are", "we've": "we have",
"weren't": "were not", "what're": "what are", "what's": "what is",
"when's": "when is", "where'd": "where did", "where's": "where is",
"where've": "where have", "who'll": "who will", "who's": "who is",
"who've": "who have", "why's": "why is", "won't": "will not",
"would've": "would have", "wouldn't": "would not",
"you'd": "you would", "you'd've": "you would have",
"you'll": "you will", "you're": "you are", "you've": "you have"
}
# self.reverse_contraction_map = dict([(y, x) for x, y in self.contraction_map.items()])
contraction_pattern = re.compile(r'\b({})\b'.format('|'.join(contraction_map.keys())),
flags=re.IGNORECASE|re.DOTALL)
def expand_match(contraction):
match = contraction.group(0)
first_char = match[0]
expanded_contraction = contraction_map.get(match, contraction_map.get(match.lower()))
expanded_contraction = first_char + expanded_contraction[1:]
return expanded_contraction
return contraction_pattern.sub(expand_match, sentence)
@staticmethod
def contract(sentence, **kwargs):
"""Contract expanded contractions in a sentence (if any)
Parameters
----------
sentence : str
input string
Returns
-------
string
String with contractions contracted (if any)
"""
reverse_contraction_map = {
'is not': "isn't", 'are not': "aren't", 'cannot': "can't",
'could not': "couldn't", 'did not': "didn't", 'does not':
"doesn't", 'do not': "don't", 'had not': "hadn't", 'has not':
"hasn't", 'have not': "haven't", 'he is': "he's", 'how did':
"how'd", 'how is': "how's", 'I would': "I'd", 'I will': "I'll",
'I am': "I'm", 'i would': "i'd", 'i will': "i'll", 'i am': "i'm",
'it would': "it'd", 'it will': "it'll", 'it is': "it's",
'might not': "mightn't", 'must not': "mustn't", 'need not': "needn't",
'ought not': "oughtn't", 'shall not': "shan't", 'she would': "she'd",
'she will': "she'll", 'she is': "she's", 'should not': "shouldn't",
'that would': "that'd", 'that is': "that's", 'there would':
"there'd", 'there is': "there's", 'they would': "they'd",
'they will': "they'll", 'they are': "they're", 'was not': "wasn't",
'we would': "we'd", 'we will': "we'll", 'we are': "we're", 'were not':
"weren't", 'what are': "what're", 'what is': "what's", 'when is':
"when's", 'where did': "where'd", 'where is': "where's",
'who will': "who'll", 'who is': "who's", 'who have': "who've", 'why is':
"why's", 'will not': "won't", 'would not': "wouldn't", 'you would':
"you'd", 'you will': "you'll", 'you are': "you're",
}
reverse_contraction_pattern = re.compile(r'\b({})\b '.format('|'.join(reverse_contraction_map.keys())),
flags=re.IGNORECASE|re.DOTALL)
def cont(possible):
match = possible.group(1)
first_char = match[0]
expanded_contraction = reverse_contraction_map.get(match, reverse_contraction_map.get(match.lower()))
expanded_contraction = first_char + expanded_contraction[1:] + ' '
return expanded_contraction
return reverse_contraction_pattern.sub(cont, sentence)
@staticmethod
def change_names(doc, meta=False, n=10, first_only=False, last_only=False, seed=None):
"""Replace names with other names
Parameters
----------
doc : spacy.token.Doc
input
meta : bool
if True, will return list of (orig_name, new_name) as meta
n : int
number of names to replace original names with
first_only : bool
if True, will only replace first names
last_only : bool
if True, will only replace last names
seed : int
random seed
Returns
-------
list(str)
if meta=True, returns (list(str), list(tuple))
Strings with names replaced.
"""
if seed is not None:
np.random.seed(seed)
ents = [x.text for x in doc.ents if np.all([a.ent_type_ == 'PERSON' for a in x])]
ret = []
ret_m = []
for x in ents:
f = x.split()[0]
sex = None
if f.capitalize() in Perturb.data['name_set']['women']:
sex = 'women'
if f.capitalize() in Perturb.data['name_set']['men']:
sex = 'men'
if not sex:
continue
if len(x.split()) > 1:
l = x.split()[1]
if len(l) > 2 and l.capitalize() not in Perturb.data['name_set']['last']:
continue
else:
if last_only:
return None
names = Perturb.data['name'][sex][:90+n]
to_use = np.random.choice(names, n)
if not first_only:
f = x
if len(x.split()) > 1:
last = Perturb.data['name']['last'][:90+n]
last = np.random.choice(last, n)
to_use = ['%s %s' % (x, y) for x, y in zip(names, last)]
if last_only:
to_use = last
f = x.split()[1]
for y in to_use:
ret.append(re.sub(r'\b%s\b' % re.escape(f), y, doc.text))
ret_m.append((f, y))
return process_ret(ret, ret_m=ret_m, n=n, meta=meta)
@staticmethod
def change_location(doc, meta=False, seed=None, n=10):
"""Change city and country names
Parameters
----------
doc : spacy.token.Doc
input
meta : bool
if True, will return list of (orig_loc, new_loc) as meta
seed : int
random seed
n : int
number of locations to replace original locations with
Returns
-------
list(str)
if meta=True, returns (list(str), list(tuple))
Strings with locations replaced.
"""
if seed is not None:
np.random.seed(seed)
ents = [x.text for x in doc.ents if np.all([a.ent_type_ == 'GPE' for a in x])]
ret = []
ret_m = []
for x in ents:
if x in Perturb.data['city']:
names = Perturb.data['city'][:100]
elif x in Perturb.data['country']:
names = Perturb.data['country'][:50]
else:
continue
sub_re = re.compile(r'\b%s\b' % re.escape(x))
to_use = np.random.choice(names, n)
ret.extend([sub_re.sub(n, doc.text) for n in to_use])
ret_m.extend([(x, n) for n in to_use])
return process_ret(ret, ret_m=ret_m, n=n, meta=meta)
@staticmethod
def change_number(doc, meta=False, seed=None, n=10):
"""Change integers to other integers within 20% of the original integer
Does not change '2' or '4' to avoid abbreviations (this is 4 you, etc)
Parameters
----------
doc : spacy.token.Doc
input
meta : bool
if True, will return list of (orig_number, new_number) as meta
seed : int
random seed
n : int
number of numbers to replace original locations with
Returns
-------
list(str)
if meta=True, returns (list(str), list(tuple))
Strings with numbers replaced.
"""
if seed is not None:
np.random.seed(seed)
nums = [x.text for x in doc if x.text.isdigit()]
ret = []
ret_m = []
for x in nums:
# e.g. this is 4 you
if x == '2' or x == '4':
continue
sub_re = re.compile(r'\b%s\b' % x)
try:
change = int(int(x) * .2) + 1
except:
continue
to_sub = np.random.randint(-min(change, int(x) - 1), change + 1, n * 3)
to_sub = ['%s' % str(int(x) + t) for t in to_sub if str(int(x) + t) != x][:n]
ret.extend([sub_re.sub(n, doc.text) for n in to_sub])
ret_m.extend([(x, n) for n in to_sub])
return process_ret(ret, ret_m=ret_m, n=n, meta=meta)
| 21,734 | 36.217466 | 113 | py |
checklist | checklist-master/checklist/editor.py | import collections
import itertools
import string
import numpy as np
import re
import copy
import os
import json
import munch
import pickle
import csv
from .viewer.template_editor import TemplateEditor
from .multilingual import multilingual_params, get_language_code
class MunchWithAdd(munch.Munch):
def __add__(self, other):
temp = copy.deepcopy(self)
for k in self:
try:
temp[k] = temp[k] + other[k]
except KeyError:
raise Exception('Both Munches must have the same keys')
return temp
def __iadd__(self, other):
for k in self:
self[k] = self[k] + other[k]
return self
def __hash__(self):
return hash(self.toJSON())
class SafeFormatter(string.Formatter):
def vformat(self, format_string, args, kwargs):
args_len = len(args) # for checking IndexError
tokens = []
for (lit, name, spec, conv) in self.parse(format_string):
# re-escape braces that parse() unescaped
lit = lit.replace('{', '{{').replace('}', '}}')
# only lit is non-None at the end of the string
if name is None:
tokens.append(lit)
else:
# but conv and spec are None if unused
conv = '!' + conv if conv else ''
spec = ':' + spec if spec else ''
# name includes indexing ([blah]) and attributes (.blah)
# so get just the first part
fp = name.split('[')[0].split('.')[0]
# treat as normal if fp is empty (an implicit
# positional arg), a digit (an explicit positional
# arg) or if it is in kwargs
if not fp or fp.isdigit() or fp in kwargs:
tokens.extend([lit, '{', name, conv, spec, '}'])
# otherwise escape the braces
else:
tokens.extend([lit, '{{', name, conv, spec, '}}'])
format_string = ''.join(tokens) # put the string back together
# finally call the default formatter
return string.Formatter.vformat(self, format_string, args, kwargs)
def recursive_format(obj, mapping, ignore_missing=False):
"""Formats all strings within an object, using mapping
Parameters
----------
obj : string, tuple, list, or dict
Object (leaves must be strings, regardless of type)
mapping : dict
format dictionary, maps keys to values
ignore_missing : bool
If True, will not throw exception if a string contains a tag not
present in mapping, and will keep the tag instead.
Returns
-------
string, tuple, list, or dict
Object of the same type as obj, with strings formatted (tags replaced
by their value)
"""
def formatfn(x, mapping):
fmt = SafeFormatter()
formatz = lambda x, m: x.format(**m) if not ignore_missing else fmt.format(x, **m)
options = re.compile(r'{([^}]+):([^}]+)}')
def mysub(match):
options, thing = match.group(1, 2)
ret = ''
if 'a' in options:
if ignore_missing and thing not in mapping:
return match.group()
else:
word = formatz('{%s}' % thing, mapping)
ret += '%s ' % add_article(word).split()[0]
ret += '{%s}' % thing
return ret
x = options.sub(mysub, x)
return formatz(x, mapping)
return recursive_apply(obj, formatfn, mapping)
def recursive_apply(obj, fn, *args, **kwargs):
"""Recursively applies a function to an obj
Parameters
----------
obj : string, tuple, list, or dict
Object (leaves must be strings, regardless of type)
fn : function
function to be applied to the leaves (strings)
Returns
-------
string, tuple, list, or dict
Object of the same type as obj, with fn applied to leaves
"""
if type(obj) in [str, bytes]:
return fn(obj, *args, **kwargs)#obj.format(**(mapping))
elif type(obj) == tuple:
return tuple(recursive_apply(list(obj), fn, *args, **kwargs))
elif type(obj) == list:
return [recursive_apply(o, fn, *args, **kwargs) for o in obj]
elif type(obj) == dict:
return {k: recursive_apply(v, fn, *args, **kwargs) for k, v in obj.items()}
else:
return fn(obj, *args, **kwargs)
# return obj
def replace_mask(text):
"""Replaces multiple instances of mask with indexed versions.
Parameters
----------
text : string
masked input, e.g. "This is a {mask} {mask} and {mask}"
Returns
-------
string
multiple instances of the same mask are replaced with indexed versions
e.g. "This is a {mask[0]} {mask[1]} and {mask[2]}
"""
mask_finder = re.compile(r'\{((?:[^\}]*:)?mask\d*)\}')
i = 0
while mask_finder.search(text):
text = mask_finder.sub(r'{\1[%d]}' % i, text, 1)
i += 1
return text
def add_article(noun):
return 'an %s' % noun if noun[0].lower() in ['a', 'e', 'i', 'o', 'u'] else 'a %s' % noun
def find_all_keys(obj):
"""Finds all tag keys in object
Parameters
----------
obj : string, tuple, list, or dict
Object (leaves must be strings, regardless of type)
Returns
-------
set
Set of all keys (with options)
"""
strings = get_all_strings(obj)
ret = set()
for s in strings:
f = string.Formatter()
for x in f.parse(s):
r = x[1] if not x[2] else '%s:%s' % (x[1], x[2])
ret.add(r)
return set([x for x in ret if x])
def get_mask_index(obj):
"""Find all masked strings in obj and index them by mask id
Parameters
----------
obj : string, tuple, list, or dict
Object (leaves must be strings, regardless of type)
Returns
-------
tuple(dict, dict)
First dict is a map from mask id to list of strings
Second dict is a map from mask id to options
"""
strings = get_all_strings(obj)
# ?: after parenthesis makes group non-capturing
mask_finder = re.compile(r'\{(?:[^\}]*:)?mask\d*\}')
mask_rep = re.compile(r'[\{\}]')
find_options = re.compile(r'.*:')
ret = collections.defaultdict(lambda: [])
options = collections.defaultdict(lambda: '')
for s in strings:
masks = mask_finder.findall(s)
nooptions = [mask_rep.sub('', find_options.sub('', x)) for x in masks]
ops = [find_options.search(mask_rep.sub('', x)) for x in masks]
ops = [x.group().strip(':') for x in ops if x]
if len(set(nooptions)) > 1:
raise Exception('Can only have one mask index per template string')
if nooptions:
ret[nooptions[0]].append(s)
options[nooptions[0]] += ''.join(ops)
return ret, options
def get_all_strings(obj):
"""Returns all strings in obj
Parameters
----------
obj : string, tuple, list, or dict
Object (leaves must be strings, regardless of type)
Returns
-------
set
All strings in obj leaves.
"""
ret = set()
if type(obj) in [str, bytes]:
ret.add(obj)
elif type(obj) in [tuple, list, dict]:
if type(obj) == dict:
obj = obj.values()
k = [get_all_strings(x) for x in obj]
k = [x for x in k if x]
for x in k:
ret = ret.union(x)
return set([x for x in ret if x])
def get_all_strings_ordered(obj):
ret = list()
if type(obj) in [str, bytes]:
ret.append(obj)
elif type(obj) in [tuple, list, dict]:
if type(obj) == dict:
obj = obj.values()
k = [get_all_strings(x) for x in obj]
for x in k:
ret += x
return [x for x in ret if x]
def wrapped_random_choice(x, *args, **kwargs):
try:
return np.random.choice(x, *args, **kwargs)
except:
idxs = np.random.choice(len(x), *args, **kwargs)
return type(x)([x[i] for i in idxs])
class Editor(object):
def __init__(self, language='english', model_name=None):
self.lexicons = {}
self.data = {}
self.tg_params = {
'language': language,
}
if model_name is not None:
self.tg_params['model_name'] = model_name
self._load_lexicons(language)
self.selected_suggestions = []
def _load_lexicons(self, language):
cur_folder = os.path.dirname(__file__)
folder = os.path.abspath(os.path.join(cur_folder, "data", 'lexicons'))
for f in os.listdir(folder):
self.lexicons.update(json.load(open(os.path.join(folder, f))))
self.data['names'] = json.load(open(os.path.join(cur_folder, 'data', 'names.json')))
self.data['names'] = {x:set(self.data['names'][x]) for x in self.data['names']}
make_munch = lambda x: munch.Munch(x) if type(x) == dict else x
for x in self.lexicons:
self.lexicons[x] = [make_munch(x) for x in self.lexicons[x]]
language = get_language_code(language)
wikidata = pickle.load(open(os.path.join(cur_folder, 'data', 'wikidata.pkl'), 'rb'))
get_ln = lambda d: d.get(language, d.get('en'))
self.lexicons['male'] = get_ln(wikidata.mnames)
self.lexicons['female'] = get_ln(wikidata.fnames)
self.lexicons['first_name'] = [y for x in zip(self.lexicons['male'], self.lexicons['female']) for y in x]
self.lexicons['last_name'] = get_ln(wikidata.lnames)
self.lexicons['country'] = [get_ln(x.label) for x in wikidata.countries]
# united states by default
self.lexicons['city'] = [get_ln(x.label) for x in wikidata.countries[2].cities]
# Most populous country that has language as official language
for country in wikidata.countries:
if country.primary_lang == language:
self.lexicons['city'] = [get_ln(x.label) for x in country.cities]
break
self.lexicons['country_city'] = munch.Munch()
for country in wikidata.countries:
l = country.label.en.replace(' ', '_')
self.lexicons['country_city'][l] = [get_ln(x.label) for x in country.cities]
self.lexicons['male_from'] = wikidata.male_by_country
self.lexicons['female_from'] = wikidata.female_by_country
self.lexicons['last_from'] = wikidata.last_by_country
self.lexicons = munch.Munch(self.lexicons)
def __getattr__(self, attr):
if attr == 'tg':
from .text_generation import TextGenerator
params = multilingual_params(**self.tg_params)
self.tg = TextGenerator(**params)
return self.tg
else:
raise AttributeError
def suggest_replace(self, text, word, full_sentences=False, words_and_sentences=False, **kwargs):
"""Masked language model suggestion for replacing word in sentence
Parameters
----------
text : str
context
word : str
word to be replaced
full_sentences : bool
If True, returns full sentences with replaced suggestions
words_and_sentences : bool
If True, returns tuples of (replacement word, full_sentence)
Returns
-------
list
Default: list of strings, suggestions for replacements
If full_sentences or words_and_sentences: see documentation above.
"""
ret = self.tg.replace_word(text, word, **kwargs)
if kwargs.get('verbose', False):
print('\n'.join(['%6s %s' % ('%.2f' % x[2], x[1]) for x in ret[:5]]))
if words_and_sentences:
return [(tuple(x[0]), x[1]) if len(x[0]) > 1 else (x[0][0], x[1]) for x in ret]
if full_sentences:
return [x[1] for x in ret]
else:
return [tuple(x[0]) if len(x[0]) > 1 else x[0][0] for x in ret]
def _wordnet_stuff(self, templates, word, type, threshold=5, depth=3, pos=None, **kwargs):
texts = self.template(templates, unroll=True, **kwargs).data
idxs = np.random.choice(len(texts), min(10, len(texts)), replace=False)
texts = [texts[i] for i in idxs]
if type != 'related' and any([word not in x for x in texts]):
raise Exception('word %s must be in all templates' % word)
fn = {'antonyms': self.tg.antonyms,
'synonyms': self.tg.synonyms,
'related': self.tg.related_words,
'hypernyms': self.tg.more_general,
'hyponyms': self.tg.more_specific,
}[type]
return [x[0][0] for x in fn(texts, word, threshold=threshold, pos=pos, depth=depth)]
def antonyms(self, templates, word, threshold=5, **kwargs):
"""Find antonyms of word that fit in templates
Parameters
----------
templates : str, list, tuple, or dict
On leaves: templates with {tags}, which will be substituted for mapping in **kwargs
word : str
Word for which we want antonyms
threshold : float
Maximum allowed log likelihood difference between word and antonym in context
Returns
-------
list
List of antonyms that fit the given templates
"""
return self._wordnet_stuff(templates, word, 'antonyms', threshold=threshold, **kwargs)
def synonyms(self, templates, word, threshold=5, **kwargs):
"""Find synonyms of word that fit in templates
Parameters
----------
templates : str, list, tuple, or dict
On leaves: templates with {tags}, which will be substituted for mapping in **kwargs
word : str
Word for which we want synonyms
threshold : float
Maximum allowed log likelihood difference between word and antonym in context
Returns
-------
list
List of synonyms that fit the given templates
"""
return self._wordnet_stuff(templates, word, 'synonyms', threshold=threshold, **kwargs)
def related_words(self, templates, word, threshold=5, **kwargs):
"""Find words that are related to word that fit in templates
By related words, we mean hyponyms of the word's hypernyms
Parameters
----------
templates : str, list, tuple, or dict
On leaves: templates with {tags}, which will be substituted for mapping in **kwargs
word : str
Word for which we want related words
threshold : float
Maximum allowed log likelihood difference between word and antonym in context
Returns
-------
list
List of related words that fit the given templates
"""
return self._wordnet_stuff(templates, word, 'related', threshold=threshold, **kwargs)
def hypernyms(self, templates, word, threshold=5, **kwargs):
"""Find hypernyms of word that fit in templates
Parameters
----------
templates : str, list, tuple, or dict
On leaves: templates with {tags}, which will be substituted for mapping in **kwargs
word : str
Word for which we want hypernyms
threshold : float
Maximum allowed log likelihood difference between word and antonym in context
Returns
-------
list
List of hypernyms that fit the given templates
"""
return self._wordnet_stuff(templates, word, 'hypernyms', threshold=threshold, **kwargs)
def hyponyms(self, templates, word, threshold=5, **kwargs):
"""Find hyponyms of word that fit in templates
Parameters
----------
templates : str, list, tuple, or dict
On leaves: templates with {tags}, which will be substituted for mapping in **kwargs
word : str
Word for which we want hyponyms
threshold : float
Maximum allowed log likelihood difference between word and antonym in context
Returns
-------
list
List of hyponyms that fit the given templates
"""
return self._wordnet_stuff(templates, word, 'hyponyms', threshold=threshold, **kwargs)
def suggest(self, templates, return_score=False, **kwargs):
"""Suggests fill-ins based on a masked language model
Parameters
----------
templates : str, list, tuple, or dict
On leaves: templates with {tags}, which will be substituted for mapping in **kwargs
Must have at least one {mask}. Cannot have {mask} and {mask1}, but can have multiple {mask}s
return_score : bool
If True, returns tuples of (word, score)
**kwargs : type
See documentation for function 'template'
Returns
-------
list(str or tuple)
list of fill-in suggestions, sorted by likelihood
(with likelihood if return_score=True)
"""
mask_index, ops = get_mask_index(templates)
if not mask_index:
return []
if len(mask_index) != 1:
raise Exception('Only one mask index is allowed')
ret = self.template(templates, **kwargs, mask_only=True)
xs = [tuple(x[0]) if len(x[0]) > 1 else x[0][0] for x in ret]
if return_score:
scores = [x[2] for x in ret]
xs = list(zip(xs, scores))
if kwargs.get('verbose', False):
print('\n'.join(['%6s %s' % ('%.2f' % x[2], x[1]) for x in ret[:5]]))
return xs
def _set_selected_suggestions(self, mask_suggests):
self.selected_suggestions = mask_suggests
return self.selected_suggestions
def visual_suggest(self, templates, **kwargs):
"""Spawns a jupyter visualization for masked language model suggestions
Parameters
----------
templates : str, list, tuple, or dict
On leaves: templates with {tags}, which will be substituted for mapping in **kwargs
Must have at least one {mask}. Cannot have {mask} and {mask1}, but can have multiple {mask}s
**kwargs : type
See documentation for function 'template'
Returns
-------
TemplateEditor
visualization. Selected suggestions will be in self.selected_suggestions
"""
tagged_keys = find_all_keys(templates)
template_strs = get_all_strings_ordered(templates)
items = self._get_fillin_items(tagged_keys, max_count=5, **kwargs)
kwargs["verbose"] = False
mask_suggests = self.suggest(templates, **kwargs)
if not mask_suggests:
raise Exception('No valid suggestions for the given template!')
self.selected_suggestions = []
return TemplateEditor(
template_strs=template_strs,
tagged_keys=tagged_keys,
tag_dict=items,
mask_suggests=mask_suggests[:50],
format_fn=recursive_format,
select_suggests_fn=self._set_selected_suggestions,
tokenizer=self.tg.tokenizer
)
def add_lexicon(self, name, values, overwrite=False, append=False, remove_duplicates=False):
"""Add tag to lexicon
Parameters
----------
name : str
Tag name.
values : list(str)
Tag values.
overwrite : bool
If True, replaces tag with the same name if it already exists
append : bool
If True, adds values to current lexicon with name
remove_duplicates: bool
If append=True and remove_duplicates=True, remove duplicate values
from lexicon after appending
"""
# words can be strings, dictionarys, and other objects
if overwrite == True and append == True:
raise Exception('Either overwrite or append must be False')
if append == True:
if name not in self.lexicons:
self.lexicons[name] = values
else:
self.lexicons[name].extend(values)
if remove_duplicates == True:
self.lexicons[name] = list(set(self.lexicons[name]))
return
if name in self.lexicons and not overwrite:
raise Exception('%s already in lexicons. Call with overwrite=True to overwrite' % name)
self.lexicons[name] = values
def add_lexicon_from_csv(self, name, path, overwrite=False, append=False, remove_duplicates=False):
"""Add tag to lexicon from csv file
Parameters
----------
name : str
Tag name.
path : str
Path to csv file
overwrite : bool
If True, replaces tag with the same name if it already exists
append : bool
If True, adds values to current lexicon with name
remove_duplicates: bool
If append=True and remove_duplicates=True, remove duplicate values
from lexicon after appending
"""
values = []
col_names = []
with open(path, newline='') as f:
reader = csv.reader(f)
for (i, row) in enumerate(reader):
if i == 0:
for col_name in row:
col_names.append(col_name)
else:
d = {}
if len(row) != len(col_names):
raise Exception(f'Length of row {i} does not match header length ({len(row)} != {len(col_names)})')
for (j, val) in enumerate(row):
d[col_names[j]] = val
values.append(MunchWithAdd(d))
self.add_lexicon(name, values, overwrite=overwrite, append=append, remove_duplicates=remove_duplicates)
def _get_fillin_items(self, all_keys, max_count=None, **kwargs):
items = {}
mask_match = re.compile(r'mask\d*')
for k in kwargs:
if re.search(r'\d+$', k):
raise(Exception('Error: keys cannot end in integers, we use that to index multiple copies of the same key (offending key: "%s")' % k))
for k in all_keys:
# TODO: process if ends in number
# TODO: process if is a:key to add article
k = re.sub(r'\..*', '', k)
k = re.sub(r'\[.*\]', '', k)
k = re.sub(r'.*?:', '', k)
newk = re.sub(r'\d+$', '', k)
if mask_match.match(k):
continue
if newk in kwargs:
items[k] = kwargs[newk]
elif newk in self.lexicons:
items[k] = self.lexicons[newk]
else:
raise(Exception('Error: key "%s" not in items or lexicons' % newk))
if max_count:
items[k] = items[k][:max_count]
return items
def template(self, templates, nsamples=None,
product=True, remove_duplicates=False, mask_only=False,
unroll=False, labels=None, meta=False, save=False, **kwargs):
"""Fills in templates
Parameters
----------
templates : str, list, tuple, or dict
On leaves: templates with {tags}, which will be substituted for mapping in **kwargs
Can have {mask} tags, which will be replaced by a masked language model.
Other tags can be numbered for distinction, e.g. {person} and {person1} will be considered
separate tags, but both will use fill-ins for 'person'
nsamples : int
Number of samples
product : bool
If true, take cartesian product
remove_duplicates : bool
If True, will not generate any strings where two or more fill-in values are duplicates.
mask_only : bool
If True, return only fill-in values for {mask} tokens
unroll : bool
If True, returns list of strings regardless of template type (i.e. unrolls)
labels : int or object with strings on leaves
If int, all generated strings will have the same label. Otherwise, can refer
to tags, or be strings, etc. Output will be in ret.meta
meta : bool
If True, ret.meta will contain a dict of fill in values for each item in ret.data
save : bool
If True, ret.templates will contain all parameters and fill-in lists
**kwargs : type
Must include fill-in lists for every tag not in editor.lexicons
Returns
-------
MunchWithAdd
Returns ret, a glorified dict, which will have the filled in templates in ret.data.
It may contain ret.labels, ret.templates and ret.meta (depending on parameters as noted above)
You can add or += two MunchWithAdd, which will concatenate values
"""
# 1. go through object, find every attribute inside brackets
# 2. check if they are in kwargs and self.attributes
# 3. generate keys and vals
# 4. go through object, generate
params = locals()
ret = MunchWithAdd()
del params['kwargs']
del params['self']
templates = copy.deepcopy(templates)
added_labels = False
if labels is not None and type(labels) != int:
added_labels = True
templates = (templates, labels)
all_keys = find_all_keys(templates)
items = self._get_fillin_items(all_keys, **kwargs)
mask_index, mask_options = get_mask_index(templates)
for mask, strings in mask_index.items():
# ks = {re.sub(r'.*?:', '', a): '{%s}' % a for a in all_keys}
ks = {}
tok = 'VERYLONGTOKENTHATWILLNOTEXISTEVER'
ks[mask] = tok
a_tok = 'thisisaratherlongtokenthatwillnotexist'
# print(mask)
# print('options:', mask_options[mask])
top = 100
find_top = re.search(r't(\d+)', mask_options[mask])
if find_top:
top = int(find_top.group(1))
sub_a = lambda x: re.sub(r'{[^:}]*a[^:}]*:(%s)}' % mask, r'{%s} {\1}' % a_tok, x)
# print(strings)
strings = recursive_apply(strings, sub_a)
ks[a_tok] = '{%s}' % a_tok
# print(strings)
ts = recursive_format(strings, ks, ignore_missing=True)
samp = self.template(ts, nsamples=5, remove_duplicates=remove_duplicates,
thisisaratherlongtokenthatwillnotexist=['a'], **kwargs).data
samp += self.template(ts, nsamples=5, remove_duplicates=remove_duplicates,
thisisaratherlongtokenthatwillnotexist=['an'], **kwargs).data
# print(samp)
# print(len([x for x in samp if ' an ' in x[0]]))
samp = [x.replace(tok, self.tg.tokenizer.mask_token) for y in samp for x in y][:20]
samp = list(set(samp))
# print(samp)
if 'beam_size' not in kwargs:
kwargs['beam_size'] = 100
# beam_size = kwargs.get('beam_size', 100)
# kwargs.
options = self.tg.unmask_multiple(samp, **kwargs)
# print(options)
# print(top)
v = [x[0] for x in options][:top]
items[mask] = v
if mask_only:
return options[:nsamples]
if save:
ret.templates = [(params, items)]
templates = recursive_apply(templates, replace_mask)
# print(templates)
keys = [x[0] for x in items.items()]
vals = [[x[1]] if type(x[1]) not in [list, tuple] else x[1] for x in items.items()]
if nsamples is not None:
# v = [np.random.choice(x, nsamples) for x in vals]
v = [wrapped_random_choice(x, nsamples) for x in vals]
if not v:
vals = [[]]
else:
vals = zip(*v)
# print(list(vals))
else:
if not product:
vals = zip(*vals)
else:
vals = itertools.product(*vals)
data = []
use_meta = meta
meta = []
for v in vals:
# print(v)
if remove_duplicates and len(v) != len(set([str(x) for x in v])):
continue
mapping = dict(zip(keys, v))
# print(templates)
# print(mapping)
data.append(recursive_format(templates, mapping))
meta.append(mapping)
if unroll and data and type(data[0]) in [list, np.array, np.ndarray, tuple]:
meta = [z for y, z in zip(data, meta) for x in y]
data = [x for y in data for x in y]
if use_meta:
ret.meta = meta
if added_labels:
data, labels = map(list, zip(*data))
ret.labels = labels
if labels is not None and type(labels) == int:
ret.labels = [labels for _ in range(len(data))]
ret.data = data
return ret
| 29,139 | 37.041775 | 150 | py |
checklist | checklist-master/checklist/expect.py | import numpy as np
import itertools
def iter_with_optional(data, preds, confs, labels, meta, idxs=None):
# If this is a single example
if type(data) not in [list, np.array, np.ndarray]:
return [(data, preds, confs, labels, meta)]
if type(meta) not in [list, np.array, np.ndarray]:
meta = itertools.repeat(meta)
else:
if len(meta) != len(data):
raise(Exception('If meta is list, length must match data'))
if type(labels) not in [list, np.array, np.ndarray]:
labels = itertools.repeat(labels)
else:
if len(labels) != len(data):
raise(Exception('If labels is list, length must match data'))
ret = zip(data, preds, confs, labels, meta)
if idxs is not None:
ret = list(ret)
ret = [ret[i] for i in idxs]
return ret
class Expect:
"""Helpers for writing expectation functions over tests.
Each test has a list of testcases, and each testcase has a list of examples.
Expectation function will act on whole tests, testcases, individual examples, or pairs of examples.
In any of these, the output of an expectation function for a single example
is an integer, float, bool, or None, where:
> 0 (or True) means passed,
<= 0 or False means fail, and (optionally) the magnitude of the
failure, indicated by distance from 0, e.g. -10 is worse than -1
None means the test does not apply, and this should not be counted
"""
@staticmethod
def test(fn):
"""Expectation over a whole test
Parameters
----------
fn : function
Arguments: (data, preds, confs, labels=None, meta=None), all of
which are potentially lists of lists
Returns: list of np.arrays, representing results for
examples inside a testcase. See docstring for the Expect class
for what different values in the output mean.
Returns
-------
function
Arguments: AbstractTest
Returns: List of np.arrays
"""
def expect(self):
return fn(self.data, self.results.preds, self.results.confs, self.labels, self.meta, self.run_idxs)
return expect
@staticmethod
def testcase(fn):
"""Expectation over a single testcase (may have multiple examples)
Parameters
----------
fn : function
Arguments: (xs, preds, confs, labels=None, meta=None)
Returns: np.array, representing results for the examples inside the
testcase. See docstring for the Expect class for what different
values in the output mean.
Returns
-------
function
Arguments: AbstractTest
Returns: List of np.arrays
"""
def expect(self):
zipped = iter_with_optional(self.data, self.results.preds, self.results.confs, self.labels, self.meta, self.run_idxs)
return [fn(x, pred, confs, labels, meta) for x, pred, confs, labels, meta in zipped]
return expect
@staticmethod
def single(fn):
"""Expectation over a single example
Parameters
----------
fn : function
Arguments: (x, pred, conf, label=None, meta=None)
Returns: bool, float, or int. See docstring for the Expect class
for what different values in the output mean.
Returns
-------
function
Arguments: AbstractTest
Returns: List of np.arrays
"""
def expect_fn(xs, preds, confs, label=None, meta=None):
return np.array([fn(x, p, c, l, m) for x, p, c, l, m in iter_with_optional(xs, preds, confs, label, meta)])
return Expect.testcase(expect_fn)#, agg_fn)
@staticmethod
def pairwise(fn):
"""Expectation over pairs of examples, suitable for perturbation tests
Parameters
----------
fn : function
Arguments: (orig_pred, pred, orig_conf, conf, labels=None, meta=None)
Orig_pred and orig_conf are the prediction and the confidence
of the first example in the test case
Returns: bool, float, or int. See docstring for the Expect class
for what different values in the output mean.
Returns
-------
function
Arguments: AbstractTest
Returns: List of np.arrays
"""
def expect_fn(xs, preds, confs, labels=None, meta=None):
orig_pred = preds[0]
orig_conf = confs[0]
return np.array([fn(orig_pred, p, orig_conf, c, l, m) for _, p, c, l, m in iter_with_optional(xs, preds, confs, labels, meta)] )
return Expect.testcase(expect_fn)
@staticmethod
def aggregate(data, agg_fn='all'):
"""aggregates expectation results for all examples in each test case
Parameters
----------
data : type
list of np.arrays
agg_fn : function or string in 'all', 'all_except_first'
Arguments: np.array
Returns: bool, float, or int. See docstring for the Expect class
for what different values in the output mean.
Returns
-------
np.array
Of bool, float, or int. See docstring for the Expect class
for what different values in the output mean.
"""
# data is a list of lists or list of np.arrays
return np.array([Expect.aggregate_testcase(x, agg_fn) for x in data])
@staticmethod
def aggregate_testcase(expect_results, agg_fn='all'):
"""See docstring for aggregate"""
if agg_fn == 'all':
agg_fn = Expect.all()
if agg_fn == 'all_except_first':
agg_fn = Expect.all(ignore_first=True)
if expect_results is None:
return None
r = [x for x in expect_results if x is not None]
if not r:
return None
else:
return agg_fn(np.array(r))
@staticmethod
def all(ignore_first=False):
"""Aggregate such that all have to be True
See docstring for "aggregate", this is an aggregation function
Parameters
----------
ignore_first : bool
If True, do not require first example to be True (useful for perturbation tests)
Returns
-------
function
aggregation function
"""
def tmp_fn(results):
if ignore_first:
results = results[1:]
return np.all(results > 0)
return tmp_fn
@staticmethod
def wrap_slice(expect_fn, slice_fn, agg_fn='all'):
"""Wraps an expectation function with a slice function to discard certain testcases.
Parameters
----------
expect_fn : function
an expectation function
slice_fn : function
A slice function, slices testcases.
Arguments: the same as the expectation function
Returns: np.array where True means 'keep' and False means 'discard'
agg_fn : function
Aggregates examples within a test case. See aggregate_testcase
Returns
-------
function
The expect function, but now returning None for discarded examples
"""
def wrapped(*args, **kwargs):
ret = expect_fn(*args, **kwargs)
sliced = Expect.aggregate(slice_fn(*args, **kwargs), agg_fn)
for i in np.where(sliced != True)[0]:
if type(ret[i]) in [list, np.array, np.ndarray]:
ret[i] = [None for _ in ret[i]]
else:
ret[i] = None
return ret
return wrapped
@staticmethod
def slice_testcase(expect_fn, slice_fn, agg_fn='all'):
"""Wraps an expectation function with a slice function to discard certain testcases.
Slice function acts on testcase.
Parameters
----------
expect_fn : function
an expectation function, where argument is a Test
slice_fn : function
A slice function, slices testcases.
Arguments: (xs, preds, confs, labels=None, meta=None)
Returns: np.array where True means 'keep' and False means 'discard'
agg_fn : function
Aggregates examples within a test case. See aggregate_testcase
Returns
-------
function
The expect function, but now returning None for discarded examples
"""
wrapped_slice = Expect.testcase(slice_fn)
return Expect.wrap_slice(expect_fn, wrapped_slice, agg_fn)
@staticmethod
def slice_single(expect_fn, slice_fn, agg_fn='all'):
"""Wraps an expectation function with a slice function to discard certain testcases.
Slice function acts on single examples.
Parameters
----------
expect_fn : function
an expectation function, where argument is a Test
slice_fn : function
A slice function, slices testcases.
Arguments: (x, pred, conf, label=None, meta=None)
Returns: True ('keep') or False ('discard')
agg_fn : function
Aggregates examples within a test case. See aggregate_testcase
Returns
-------
function
The expect function, but now returning None for discarded examples
"""
wrapped_slice = Expect.single(slice_fn)
return Expect.wrap_slice(expect_fn, wrapped_slice, agg_fn)
@staticmethod
def slice_orig(expect_fn, slice_fn, agg_fn='all'):
"""Wraps an expectation function with a slice function to discard certain testcases.
Slice function acts on the original example in a perturbation test.
Parameters
----------
expect_fn : function
an expectation function, where argument is a Test
slice_fn : function
A slice function, slices original examples for perturbation tests.
Arguments: (orig_pred, orig_conf)
Returns: True ('keep') or False ('discard')
agg_fn : function
Aggregates examples within a test case. See aggregate_testcase
Returns
-------
function
The expect function, but now returning None for discarded examples
"""
new_fn = lambda orig, pred, *args, **kwargs: slice_fn(orig, pred)
return Expect.slice_pairwise(expect_fn, new_fn, agg_fn)
@staticmethod
def slice_pairwise(expect_fn, slice_fn, agg_fn='all_except_first'):
"""Wraps an expectation function with a slice function to discard certain testcases.
Slice function acts on pairs.
Parameters
----------
expect_fn : function
an expectation function, where argument is a Test
slice_fn : function
A slice function, slices testcases.
Arguments: (orig_pred, pred, orig_conf, conf, labels=None, meta=None)
Returns: np.array where True means 'keep' and False means 'discard'
agg_fn : function
Aggregates examples within a test case. See aggregate_testcase
Returns
-------
function
The expect function, but now returning None for discarded examples
"""
wrapped_slice = Expect.pairwise(slice_fn)
return Expect.wrap_slice(expect_fn, wrapped_slice, agg_fn)
@staticmethod
def combine(expect_fn1, expect_fn2, combine_fn, ignore_none=True):
"""Creates a wrapper that combines two expectation functions
Parameters
----------
expect_fn1 : function
an expectation function, where argument is a Test
expect_fn2 : function
an expectation function, where argument is a Test
combine_fn : function
Arguments: (x1, x2), the output of (expect_fn1, expect_fn2)
Returns: bool, float, or int. See docstring for the Expect class
for what different values in the output mean.
ignore_none : bool
If True, will take x1 if x2 is None and vice versa. If both are Nones,
will return None without calling combine_fn.
Returns
-------
function
wrapped expectation function
"""
# each expect_fn takes 'self' as input (i.e. wrapped by Expect.test or Expect.testcase)
# combine_fn takes (x1, x2), where each is an output from expect_fn1 or
# 2 (a single example within a testcase, which is a float, a bool, or
# None) and combines them into a float, a bool, or None if
# ignore_none=True, will take one of the inputs if the other is None
# without passing them to the combine_fn (and return None if both are
# Nones. otherwise, combine_fn must handle Nones)
def tmp_fn(self):
e1 = expect_fn1(self)
e2 = expect_fn2(self)
ret = []
for list1, list2 in zip(e1, e2):
r = []
for z1, z2 in zip(list1, list2):
if ignore_none:
if z1 == None:
r.append(z2)
continue
elif z2 == None:
r.append(z1)
continue
r.append(combine_fn(z1, z2))
ret.append(np.array(r))
return ret
return tmp_fn
@staticmethod
def combine_and(expect_fn1, expect_fn2):
"""Combines two expectation functions with the 'and' function
See 'combine' for more details.
"""
def combine_fn(x1, x2):
return min(x1, x2)
return Expect.combine(expect_fn1, expect_fn2, combine_fn)
@staticmethod
def combine_or(expect_fn1, expect_fn2):
"""Combines two expectation functions with the 'or' function
See 'combine' for more details.
"""
def combine_fn(x1, x2):
return max(x1, x2)
return Expect.combine(expect_fn1, expect_fn2, combine_fn)
# SAMPLE EXPECTATION FUNCTION
@staticmethod
def eq(val=None):
"""Expect predictions to be equal to a value.
See documentation for Expect.single
Parameters
----------
val : whatever or None
If None, expect prediction to be equal to label. Otherwise, to be equal to val
Returns
-------
function
an expectation function
"""
def ret_fn(x, pred, conf, label=None, meta=None):
gt = val if val is not None else label
softmax = type(conf) in [np.array, np.ndarray]
conf = conf[gt] if softmax else -conf
conf_viol = -(1 - conf)
if pred == gt:
return True
else:
return conf_viol
return Expect.single(ret_fn)
@staticmethod
def inv(tolerance=0):
"""Expect predictions not to change, with a tolerance threshold
See documentation for Expect.pairwise.
Parameters
----------
tolerance : float
If prediction changes but prediction probability is within the tolerance,
will not consider it a failure.
Returns
-------
function
an expectation function
"""
def expect(orig_pred, pred, orig_conf, conf, labels=None, meta=None):
softmax = type(orig_conf) in [np.array, np.ndarray]
try:
if pred == orig_pred:
return True
except ValueError: # np.array output
if (pred == orig_pred).all():
return True
if softmax:
orig_conf = orig_conf[orig_pred]
conf = conf[orig_pred]
if np.abs(conf - orig_conf) <= tolerance:
return True
else:
return -np.abs(conf - orig_conf)
else:
# This is being generous I think
if conf + orig_conf <= tolerance:
return True
else:
return -(conf + orig_conf)
return Expect.pairwise(expect)
@staticmethod
def monotonic(label=None, increasing=True, tolerance=0.):
"""Expect predictions to be monotonic
See documentation for Expect.pairwise.
Parameters
----------
label : None or integer (only allowed if conf is softmax)
If None, the original prediction label
increasing : bool
Whether we want monotonically increasing or decreasing
tolerance : float
If confidence goes down (up) for monotonically increasing
(decreasing) by less than tolerance, will not be considered a failure.
Returns
-------
function
an expectation function
"""
keep_label = label
def expect(orig_pred, pred, orig_conf, conf, labels=None, meta=None):
label = keep_label
softmax = type(orig_conf) in [np.array, np.ndarray]
if not softmax and label is not None:
raise(Exception('Need prediction function to be softmax for monotonic if you specify label'))
if label is None:
label = orig_pred
if softmax:
orig_conf = orig_conf[label]
conf = conf[label]
conf_diff = conf - orig_conf
else:
if pred == orig_pred:
conf_diff = conf - orig_conf
else:
conf_diff = -(orig_conf + conf)
# can't fail
if increasing and orig_conf <= tolerance:
return None
if not increasing and orig_conf >= 1 - tolerance:
return None
if increasing:
if conf_diff + tolerance >= 0:
return True
else:
return conf_diff + tolerance
# return conf + tolerance >= orig_conf
else:
if conf_diff - tolerance <= 0:
return True
else:
return -(conf_diff - tolerance)
# return conf - tolerance <= orig_conf
return Expect.pairwise(expect)
| 18,549 | 35.089494 | 140 | py |
checklist | checklist-master/checklist/multilingual.py | import collections
from iso639 import languages
def get_language_code(language):
to_try = [languages.name, languages.inverted, languages.part1]
l_to_try = [language.capitalize(), language.lower()]
for l in l_to_try:
for t in to_try:
if l in t:
if not t[l].part1:
continue
return t[l].part1
raise Exception('Language %s not recognized. Try the iso-639 code.' % language)
def multilingual_params(language, **kwargs):
language_code = get_language_code(language)
lang_model = collections.defaultdict(lambda: 'xlm-roberta-large')
lang_model['fr'] = 'flaubert/flaubert_base_cased'
lang_model['en'] = 'roberta-base'
lang_model['de'] = 'bert-base-german-cased'
prefixes = {
'af': 'Hierdie teks is in Afrikaans geskryf. ',
'sq': 'Ky tekst është shkruar në shqip. ',
'am': 'ይህ ጽሑፍ በአማርኛ ተጽ writtenል ፡፡ ',
'ar': 'هذا النص مكتوب بالعربية. ',
'hy': 'Այս տեքստը գրված է հայերեն: ',
'az': 'Bu mətn Azərbaycan dilində yazılmışdır. ',
'eu': 'Testu hau euskaraz idatzita dago. ',
'be': 'Гэты тэкст напісаны па-беларуску. ',
'bn': 'এই লেখাটি বাংলা ভাষায় রচিত;। ',
'bs': 'Ovaj tekst je napisan na bosanskom jeziku. ',
'br': 'Ce texte est écrit en breton. ',
'bg': 'Този текст е написан на български език. ',
'my': 'ဒီစာသားကိုဗမာလိုရေးထားတယ်။ ',
'ca': 'Aquest text està escrit en català ;. ',
'zh': '这段文字是用中文写的。',
'hr': 'Ovaj tekst je napisan na hrvatskom jeziku. ',
'cs': 'Tento text je psán česky. ',
'da': 'Denne tekst er skrevet på dansk. ',
'nl': 'Deze tekst is geschreven in het Nederlands ;. ',
'eo': 'This text is written in Esperanto. ',
'et': 'See tekst on kirjutatud eesti keeles. ',
'fi': 'Tämä teksti on kirjoitettu suomeksi. ',
'gl': 'Este texto está escrito en galego. ',
'ka': 'ეს ტექსტი ქართულად არის დაწერილი. ',
'el': 'Αυτό το κείμενο είναι γραμμένο στα Ελληνικά. ',
'gu': 'આ લખાણ ગુજરાતીમાં લખાયેલ છે. ',
'ha': 'An rubuta wannan rubutun cikin harshen Hausa. ',
'he': 'טקסט זה כתוב בעברית. ',
'hi': 'यह पाठ हिंदी में लिखा गया है। ',
'hu': 'Ez a szöveg magyarul készült. ',
'is': 'Þessi texti er skrifaður á íslensku. ',
'id': 'Teks ini ditulis dalam bahasa Indonesia. ',
'ga': 'Tá an téacs seo scríofa i nGaeilge. ',
'it': 'Questo testo è scritto in italiano. ',
'ja': 'このテキストは日本語で書かれています。 ',
'jv': 'Naskah iki ditulis nganggo basa jawa. ',
'kn': 'ಈ ಪಠ್ಯವನ್ನು ಕನ್ನಡದಲ್ಲಿ ಬರೆಯಲಾಗಿದೆ. ',
'kk': 'Бұл мәтін қазақ тілінде жазылған. ',
'km': 'អត្ថបទនេះត្រូវបានសរសេរនៅកណ្តាល។ ',
'ko': '이 텍스트는 한국어로 작성되었습니다. ',
'ku': 'Bu metin Kürtçe yazılmıştır. ',
'ky': 'Бул текст кыргыз тилинде жазылган;. ',
'lo': 'ບົດຂຽນນີ້ຂຽນເປັນພາສາລາວ. ',
'la': 'Questo testo è scritto in latino. ',
'lv': 'Šis teksts ir uzrakstīts latviešu valodā. ',
'lt': 'Šis tekstas parašytas lietuvių kalba. ',
'mk': 'Овој текст е напишан на македонски јазик. ',
'mg': 'Ity soratra ity dia voasoratra amin\'ny teny malagasy. ',
'ms': 'Teks ini ditulis dalam bahasa Melayu. ',
'ml': 'ഈ വാചകം മലയാളത്തിലാണ് എഴുതിയിരിക്കുന്നത്. ',
'mr': 'हा मजकूर मराठीत लिहिला आहे;. ',
'mn': 'Энэ текстийг монгол хэлээр бичсэн болно. ',
'ne': 'यो लेख नेपालीमा लेखिएको छ। ',
'no': 'Denne teksten er skrevet på norsk. ',
'ps': 'دا متن په پښتو ژبه لیکل شوی.. ',
'fa': 'این متن به زبان فارسی نوشته شده است ؛. ',
'pl': 'Ten tekst jest napisany w języku polskim. ',
'pt': 'Este texto está escrito em português. ',
'pa': 'ਇਹ ਪਾਠ ਪੰਜਾਬ ਵਿਚ ਲਿਖਿਆ ਗਿਆ ਹੈ;. ',
'ro': 'Acest text este scris în limba română ;. ',
'ru': 'Этот текст написан на русском языке. ',
'gd': 'Tha an teacsa seo sgrìobhte ann an Gàidhlig ;. ',
'sr': 'Овај текст је написан на српском. ',
'sd': 'اهو متن سنڌي ۾ لکيو وڃي ٿو. ',
'si': 'මෙම පා text ය සිංහල භාෂාවෙන් ලියා ඇත. ',
'sk': 'Tento text je v slovenskom jazyku. ',
'sl': 'To besedilo je napisano v slovenščini;. ',
'so': 'Qoraalkan wuxuu ku qoran yahay Afsoomaali. ',
'es': 'Este texto está escrito en español. ',
'su': 'Téks ieu ditulis dina basa Sunda. ',
'sw': 'Maandishi haya yameandikwa kwa kiswahili. ',
'sv': 'Denna text är skriven på svenska. ',
'ta': 'இந்த உரை தமிழில் எழுதப்பட்டுள்ளது. ',
'te': 'ఈ వచనం తెలుగులో వ్రాయబడింది. ',
'th': 'ข้อความนี้เขียนเป็นภาษาไทย ',
'tr': 'Bu metin Türkçe yazılmıştır. ',
'uk': 'Цей текст написаний українською мовою. ',
'ur': 'یہ عبارت اردو میں لکھی گئی ہے۔ ',
'ug': 'This text is written in Uighur;. ',
'uz': 'Ushbu matn o\'zbek tilida yozilgan. ',
'vi': 'Văn bản này được viết bằng tiếng Việt. ',
'cy': 'Mae\'r testun hwn wedi\'i ysgrifennu yn Gymraeg. ',
'xh': 'Lo mbhalo ubhalwe ngesiXhosa. ',
'yi': 'דער טעקסט איז געשריבן אויף ייִדיש. ',
}
params = {
'model_name': lang_model[language_code],
'prefix_sentence': prefixes.get(language_code, ''),
'allow_word_pieces': True if language_code in ['zh', 'ja', 'ko'] else False
}
if language_code not in prefixes and language_code not in ['fr', 'en', 'de']:
raise Exception('Language %s not supported yet. Sorry!' % language)
params.update(**kwargs)
return params
| 5,646 | 48.104348 | 83 | py |
checklist | checklist-master/checklist/pred_wrapper.py | import numpy as np
class PredictorWrapper:
@staticmethod
def wrap_softmax(softmax_fn):
"""Wraps softmax such that it outputs predictions and confidences
Parameters
----------
softmax_fn : fn
Takes lists of inputs, outputs softmax probabilities (2d np.array)
Returns
-------
function
wrapped prediction function, returns (preds, confs) instead of softmax
"""
def pred_and_conf(inputs):
confs = softmax_fn(inputs)
preds = np.argmax(confs, axis=1)
return preds, confs
pred_and_conf.conf = 'softmax'
return pred_and_conf
@staticmethod
def wrap_predict(predict_fn):
"""Wraps prediction functions to output predictions and a confidence score of 1
Parameters
----------
predict_fn : function
Outputs a list of predictions given inputs (strings, integers, whatever)
Returns
-------
function
wrapped prediction function, returns (preds, confs) such that confs is list of float(1)
"""
def pred_and_conf(inputs):
preds = predict_fn(inputs)
confs = np.ones(len(preds))
return preds, confs
return pred_and_conf
| 1,310 | 27.5 | 99 | py |
checklist | checklist-master/checklist/text_generation.py | from transformers import AutoTokenizer, AutoModelForMaskedLM
import collections
import itertools
import numpy as np
import re
from transformers import GPT2Config
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from tqdm.auto import tqdm
import torch
import torch.nn.functional as F
from pattern.en import wordnet, pluralize
import requests
import json
def all_synsets(word, pos=None):
map = {
'NOUN': wordnet.NOUN,
'VERB': wordnet.VERB,
'ADJ': wordnet.ADJECTIVE,
'ADV': wordnet.ADVERB
}
if pos is None:
pos_list = [wordnet.VERB, wordnet.ADJECTIVE, wordnet.NOUN, wordnet.ADVERB]
else:
pos_list = [map[pos]]
ret = []
for pos in pos_list:
ret.extend(wordnet.synsets(word, pos=pos))
return ret
def clean_senses(synsets):
return [x for x in set(synsets) if '_' not in x]
def all_possible_synonyms(word, pos=None):
ret = []
for syn in all_synsets(word, pos=pos):
# if syn.synonyms[0] != word:
# continue
ret.extend(syn.senses)
return clean_senses(ret)
def all_possible_antonyms(word, pos=None):
ret = []
for syn in all_synsets(word, pos=pos):
if not syn.antonym:
continue
for s in syn.antonym:
ret.extend(s.senses)
return clean_senses(ret)
def all_possible_hypernyms(word, pos=None, depth=None):
ret = []
for syn in all_synsets(word, pos=pos):
ret.extend([y for x in syn.hypernyms(recursive=True, depth=depth) for y in x.senses])
return clean_senses(ret)
def all_possible_hyponyms(word, pos=None, depth=None):
ret = []
for syn in all_synsets(word, pos=pos):
ret.extend([y for x in syn.hyponyms(recursive=True, depth=depth) for y in x.senses])
return clean_senses(ret)
def all_possible_related(words, pos=None, depth=1):
all_syns = [y for word in words for y in all_synsets(word, pos=pos)]
# all_syns = [all_synsets(x, pos=pos) for x in words]
# all_syns = [x[0] for x in all_syns if x]
# return all_syns
# print(all_syns)
all_ancestors = [wordnet.ancestor(s1, s2) for s1, s2 in itertools.combinations(all_syns, 2)]
all_ancestors = [x for x in all_ancestors if x]
# print(all_ancestors)
mapz = {x.lexname: x for x in all_ancestors}
all_ancestors = list(mapz.values())
all_descendents = [y for x in all_ancestors for y in x.hyponyms(recursive=True, depth=depth)]
ret = [y for x in all_descendents for y in x.senses]
return clean_senses(ret)
class TextGenerator(object):
def __init__(self, url=None, model_name='roberta-base', prefix_sentence='', allow_word_pieces=False, **kwargs):
self.url = url
if url is None:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
# self.model = BertForMaskedLM.from_pretrained('bert-base-cased')
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForMaskedLM.from_pretrained(model_name)
self.model.to(self.device)
self.model.eval()
self.prefix_sentence = prefix_sentence
self.prefix_len = len(self.tokenizer.encode(prefix_sentence, add_special_tokens=False))
self.allow_word_pieces = allow_word_pieces
tmp = self.tokenizer.tokenize(' a')[0].split('a')
assert len(tmp) == 2
assert tmp[1] == ''
self.space_prefix = tmp[0]
if not self.allow_word_pieces:
self.with_space = torch.tensor(np.array(list(set([i for x, i in self.tokenizer.get_vocab().items() if x.startswith(self.space_prefix)]))), device=self.device);
self.with_space_set = set(self.with_space.cpu().numpy())
self.special_chars = set([i for x, i in self.tokenizer.get_vocab().items() if not x.strip(self.space_prefix).isalnum()])
def unmask_multiple(self, texts, beam_size=500, candidates=None, metric='avg', **kwargs):
rets = []
for text in texts:
rets.append(self.unmask(text, beam_size, candidates))
scores = collections.defaultdict(lambda: 0.) if metric == 'avg' else collections.defaultdict(lambda: 999999999)
count = collections.defaultdict(lambda: 0.)
examples = {}
longest = max([len(x[0][0]) for x in rets])
rets = sorted(rets, key=lambda x:len(x[0][0]), reverse=True)
for r in rets:
for x in r:
tup = tuple(x[0])
if len(tup) != longest:
tups = [k for k in scores if tuple(k[:len(tup)]) == tup]
else:
tups = [tup]
for tup in tups:
count[tup] += 1
examples[tup] = x[1]
if metric == 'avg':
scores[tup] += x[-1]
elif metric == 'min':
scores[tup] = min(scores[tup], x[-1])
if metric == 'min':
for x in count:
# print(x, count[x])
if count[x] != len(texts):
scores[x] = -999999
else:
for x in scores:
scores[x] = scores[x] / len(texts)
scores = sorted(scores.items(), key=lambda x:x[1], reverse=True)
return [(list(x[0]), examples[x[0]], x[1]) for x in scores]
def unmask(self, text_with_mask, beam_size=10, candidates=None):
if self.url is not None:
params = {'text': text_with_mask, 'beam_size': beam_size, 'candidates': candidates}
r = requests.post(url='%s/unmask' % self.url, data={'params': json.dumps(params)})
r = [tuple(x) for x in json.loads(r.text)]
return r
tokenizer = self.tokenizer
model = self.model
encoded = np.array(tokenizer.encode(self.prefix_sentence + text_with_mask, add_special_tokens=True))
cands = []
if candidates is not None:
candidates = candidates + [self.space_prefix + x for x in candidates]
cands = tokenizer.convert_tokens_to_ids(candidates)
if self.allow_word_pieces:
cands_with_space = list(set(cands))
else:
cands_with_space = list(set(cands).intersection(self.with_space_set))
if not len(cands_with_space):
return []
input_ids = torch.tensor(encoded)
# toks = tokenizer.tokenize('[CLS] %s [SEP]' % string)
current_beam= [([], 0)]
masked = (input_ids == self.tokenizer.mask_token_id).numpy().nonzero()[0]
# print(masked)
while len(current_beam[0][0]) != masked.shape[0]:
current_beam = current_beam[:beam_size]
size = len(current_beam[0][0])
to_pred = []
new_beam = []
for i, current in enumerate(current_beam):
idxs = current[0]
c = encoded.copy()
c[masked[:len(idxs)]] = idxs
to_pred.append(c)
# print('ae')
# print('\n'.join([tokenizer.decode(x) for x in to_pred]))
# print()
to_pred = torch.tensor(to_pred, device=self.device).to(torch.int64)
with torch.no_grad():
outputs = model(to_pred)[0]
for i, current in enumerate(current_beam):
prev = int(to_pred[i][masked[size] - 1])
forbid = False
# allow tokens that don't start with space if previous is not alphanumeric
if not self.allow_word_pieces and prev not in self.special_chars:
forbid = True
# print('Forbid Prev, current', prev, tokenizer.decode(to_pred[i][masked[size] - 1:masked[size]+1]))
if candidates is not None:
cands_to_use = cands_with_space if forbid else cands
scores = [outputs[i, masked[size], j] for j in cands_to_use]
new = [(current[0] + [int(x[0])], float(x[1]) + current[1]) for x in zip(cands_to_use, scores)]
else:
if forbid:
v, top_preds = torch.topk(outputs[i, masked[size], self.with_space.to(torch.int64)], beam_size + 10)
top_preds = self.with_space[top_preds]
else:
v, top_preds = torch.topk(outputs[i, masked[size]], beam_size + 10)
new = [(current[0] + [int(x[0])], float(x[1]) + current[1]) for x in zip(top_preds, v)]
new_beam.extend(new)
current_beam = sorted(new_beam, key=lambda x:x[1], reverse=True)
ret = []
ret_text = []
cop = encoded.copy()
for idxs, score in current_beam:
# words = tokenizer.convert_ids_to_tokens(idxs)
words = [str(tokenizer.decode([i])).strip() for i in idxs]
cop[masked] = idxs
text = tokenizer.decode(cop[1 + self.prefix_len:-1])
ret.append((words, text, score / masked.shape[0]))
ret = sorted(ret, key=lambda x:x[2], reverse=True)
return ret
def fill_in_between(self, pieces, beam_size=10, candidates=None):
text = ''
for p in pieces[:-1]:
text += p
text += ' ' + self.tokenizer.mask_token
if p != '':
text += ' '
text += pieces[-1]
if pieces[-1] == '':
text = text.rstrip()
return self.unmask(text, beam_size=beam_size, candidates=candidates)
def replace_word(self, text, word, threshold=5, beam_size=100, candidates=None):
masked = re.sub(r'\b%s\b' % re.escape(word), self.tokenizer.mask_token, text)
if masked == text:
return []
if candidates is not None:
candidates = [word] + candidates
ret = self.unmask(masked, beam_size=beam_size, candidates=candidates)
non_word = [x for x in ret if np.all([y not in [self.tokenizer.unk_token, word] for y in x[0]])]
score = [x for x in ret if np.all([y in [word, self.tokenizer.unk_token] for y in x[0]])]
if not score:
score = 0
else:
score = score[0][-1]
escaped = re.escape(word)
# new_ret = [(x[0], x[1], score - x[2]) for x in non_word if score - x[2] < threshold]
try:
new_ret = [(x[0], re.sub(r'\b%s\b' % escaped, x[0][0], text), score - x[2]) for x in non_word if score - x[2] < threshold]
except:
new_ret = [(x[0], x[1], score - x[2]) for x in non_word if score - x[2] < threshold]
return new_ret
def more_general(self, texts, word, threshold=5, pos=None, **kwargs):
options = all_possible_hypernyms(word, pos=pos)
# print(options)
return self.filter_options(texts, word, options, threshold)
def more_specific(self, texts, word, threshold=5, depth=3, pos=None, **kwargs):
options = all_possible_hyponyms(word, depth=depth, pos=pos)
return self.filter_options(texts, word, options, threshold)
def related_words(self, texts, words, threshold=5, depth=3, pos=None, **kwargs):
if type(words) != list:
words = [words]
if len(words) == 1:
options = all_possible_hypernyms(words[0], pos=pos)
ancestors = [x[0][0] for x in self.filter_options(texts, words[0], options, threshold)]
# print(ancestors)
options = list(set([y for x in ancestors for y in all_possible_hyponyms(x, depth=depth)]))
else:
options = all_possible_related(words, depth=depth)
return self.filter_options(texts, words[0], options, threshold)
def antonyms(self, texts, word, threshold=5, pos=None, **kwargs):
options = all_possible_antonyms(word, pos=pos)
return self.filter_options(texts, word, options, threshold)
def synonyms(self, texts, word, threshold=5, pos=None, **kwargs):
options = all_possible_synonyms(word, pos=pos)
# print(options)
return self.filter_options(texts, word, options, threshold)
def filter_options(self, texts, word, options, threshold=5):
if type(texts) != list:
texts = [texts]
options = options + [word]
in_all = set(options)
orig_ret = []
for text in texts:
masked = re.sub(r'\b%s\b' % re.escape(word), self.tokenizer.mask_token, text)
if masked == text:
continue
ret = self.unmask(masked, beam_size=100, candidates=options)
if not ret:
in_all = in_all.intersection(set())
continue
non_word = [x for x in ret if np.all([y not in [self.tokenizer.unk_token, word] for y in x[0]])]
score = [x for x in ret if np.all([y in [word, self.tokenizer.unk_token] for y in x[0]])]
if score:
score = score[0][-1]
# this will happen when the word is not in the vocabulary, in which case we don't look at the score
else:
score = 0
new_ret = [(x[0], x[1], score - x[2]) for x in non_word if score - x[2] < threshold]
# print(text)
# print(new_ret)
# print()
if text == texts[0]:
orig_ret = new_ret
in_all = in_all.intersection(set([x[0][0] for x in new_ret]))
return [x for x in orig_ret if x[0][0] in in_all]
def antonym(self, text, word, threshold=5, synonym=False):
options = all_possible_antonyms(word)
if synonym:
options = all_possible_synonyms(word)
if not options:
return []
options = options + [word]
masked = re.sub(r'\b%s\b' % re.escape(word), '[MASK]', text)
if masked == text:
return []
ret = self.unmask(masked, beam_size=100000000, candidates=options)
non_word = [x for x in ret if np.all([y not in [self.tokenizer.unk_token, word] for y in x[0]])]
score = [x for x in ret if np.all([y in [word, self.tokenizer.unk_token] for y in x[0]])][0][-1]
new_ret = [(x[0], x[1], score - x[2]) for x in non_word if score - x[2] < threshold]
return new_ret
def try_all_antonyms(self, text, threshold=5, synonym=False):
if self.url is not None:
params = {'text': text }
r = requests.post(url='%s/tokenize' % self.url, data={'params': json.dumps(params)})
words = json.loads(r.text)
else:
words = self.tokenizer.tokenize(text)
new_ret = []
for word in words:
word = word.strip(self.space_prefix)
try:
if synonym:
ret = self.synonyms(text, word, threshold)
else:
ret = self.antonyms(text, word, threshold)
except:
print('Error', word)
print()
continue
new_ret.extend(ret)
return sorted(new_ret, key=lambda x:x[2])
| 15,163 | 44.951515 | 175 | py |
checklist | checklist-master/checklist/viewer/viewer.py | from .template_editor import TemplateEditor
from .test_summarizer import TestSummarizer
from .suite_summarizer import SuiteSummarizer | 133 | 43.666667 | 45 | py |
checklist | checklist-master/checklist/viewer/template_editor.py | import ipywidgets as widgets
from traitlets import Unicode, List, Dict
import os
import typing
import itertools
try:
from IPython.core.display import display, Javascript
except:
raise Exception("This module must be run in IPython.")
DIRECTORY = os.path.abspath(os.path.dirname(__file__))
# import logging
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@widgets.register
class TemplateEditor(widgets.DOMWidget):
"""An example widget."""
_view_name = Unicode('TemplateEditorView').tag(sync=True)
_model_name = Unicode('TemplateEditorModel').tag(sync=True)
_view_module = Unicode('viewer').tag(sync=True)
_model_module = Unicode('viewer').tag(sync=True)
_view_module_version = Unicode('^0.1.0').tag(sync=True)
_model_module_version = Unicode('^0.1.0').tag(sync=True)
templates = List([], help="The template list, with tags and masks.").tag(sync=True)
bert_suggests = List([], help="The BERT suggestion list").tag(sync=True)
def __init__(self, \
template_strs: typing.List[str], \
tagged_keys: typing.List[str], \
tag_dict: typing.Dict[str, str], \
mask_suggests: typing.List[typing.Union[str, tuple]], \
format_fn: typing.Callable, \
select_suggests_fn: typing.Callable, \
tokenizer, \
**kwargs):
widgets.DOMWidget.__init__(self, **kwargs)
self.format_fn = format_fn
self.select_suggests_fn = select_suggests_fn
# ONLY do tokenization here
self.tokenizer = tokenizer
self.bert_suggests = mask_suggests
self.templates = [
self.tokenize_template_str(s, tagged_keys, tag_dict) for \
s in template_strs]
self.on_msg(self.handle_events)
def tokenize_template_str(self, template_str, tagged_keys, tag_dict, max_count=5):
tagged_keys = list(tagged_keys)
trans_keys = ["{" + key + "}" for key in tagged_keys]
item_keys = [x[0] for x in tag_dict.items()]
item_vals = [[x[1][:max_count]] if type(x[1]) not in [list, tuple] else x[1][:max_count] for x in tag_dict.items()]
local_items = []
for idx, key in enumerate(tagged_keys):
self.tokenizer.add_tokens(trans_keys[idx])
for item_val in itertools.product(*item_vals):
if len(item_val) != len(set([str(x) for x in item_val])):
continue
local_item = {item_keys[i]: item_val[i] for i, _ in enumerate(item_val)}
local_items.append(local_item)
def _tokenize(text):
tokens = [self.tokenizer.decode(x) for x in self.tokenizer.encode(text, add_special_tokens=False)]
return [t for t in tokens if t]
def get_meta(text):
if text in trans_keys:
idx = trans_keys.index(text)
norm = tagged_keys[idx]
lemma = norm.split(":")[-1]
normalized_key = lemma.split('[')[0].split('.')[0]
texts = list()
for local_item in local_items:
try:
texts.append(self.format_fn(["{" + lemma +"}"], local_item)[0])
except:
pass
return (texts, norm, normalized_key)
else:
return text
template_tokens = [get_meta(t) for t in _tokenize(template_str)]
return template_tokens
def handle_events(self, _, content, buffers):
"""
Event handler. Users trigger python functions through the frontend interaction.
"""
if content.get('event', '') == 'select_suggests':
idxes = content.get("idxes", [])
selected_suggests = [self.bert_suggests[i] for i in idxes]
if self.select_suggests_fn:
self.select_suggests_fn(selected_suggests)
def render(self):
"""
Customized renderer. Directly load the bundled index.
"""
display(Javascript(open(os.path.join(DIRECTORY, 'static', 'index.js')).read()))
| 4,125 | 39.058252 | 123 | py |
checklist | checklist-master/checklist/viewer/fake_data.py | tag_dict = {'pos_adj': 'good', 'air_noun': 'flight', 'intens': 'very'}
raw_templates = [
['It', 'is', ['good', 'a:pos_adj'], ['flight', 'air_noun'], '.'],
['It', ['', 'a:mask'], ['very', 'a:intens'], ['good', 'pos_adj'], ['', 'mask'],'.']
]
suggests = [
['was', 'day'],
['been', 'day'],
['been', 'week'],
['was', 'time'],
['been', 'year'],
['was', 'experience'],
['been', 'weekend'],
['was', 'moment'],
['s', 'day'],
['was', 'game']
]
raw_testcases = [
{
"examples": [{
"new": {
"tokens": [
["Who", "is", "taller", ",", "Mary", "or", "Heather", "?"],
["Who", "is", "taller", ",", "Heather", "or", "Mary", "?"]
],
"pred": "1",
"conf": 0.7
},
"old": None,
"label": "1",
"succeed": 0,
}],
"tags": ["person1=Mary", "person2=Heather", "comparative=taller"],
"succeed": 0,
}, {
"examples": [{
"new": {
"tokens": [
["Who", "is", "taller", ",", "Mary", "or", "Heather", "?"],
["Who", "is", "taller", ",", "Heather", "or", "Mary", "?"]
],
"pred": "1",
"conf": 0.7
},
"old": None,
"label": "1",
"succeed": 1,
}, {
"new": {
"tokens": [
["Who", "is", "cooler", ",", "Mary", "or", "Heather", "?"],
["Who", "is", "cooler", ",", "Heather", "or", "Mary", "?"]
],
"pred": "1",
"conf": 0.7
},
"old": None,
"label": "1",
"succeed": 1,
}],
"tags": ["person1=Mary", "person2=Heather", "comparative=taller", "comparative=cooler"],
"succeed": 1,
}, {
"examples": [{
"new": {
"tokens": [
["Who", "is", "taller", ",", "Mary", "or", "Heather", "?"],
["Who", "is", "taller", ",", "Heather", "or", "Mary", "?"]
],
"pred": "0",
"conf": 0.9
},
"old": {
"tokens": [
["Who", "is", "taller", ",", "Mary", "or", "Heather", "?"],
["Who", "is", "taller", ",", "Mary", "or", "Heather", "?"]
],
"pred": "1",
"conf": 0.7
},
"succeed": 0,
"label": None,
}, {
"new": {
"tokens": [
["Who", "is", "cooler", ",", "Mary", "or", "Heather", "?"],
["Who", "is", "cooler", ",", "Heather", "or", "Mary", "?"]
],
"pred": "1",
"conf": 0.7
},
"old": {
"tokens": [
["Who", "is", "cooler", ",", "Mary", "or", "Heather", "?"],
["Who", "is", "cooler", ",", "Mary", "or", "Heather", "?"]
],
"pred": "0",
"conf": 0.8
},
"label": None,
"succeed": 0,
}],
"succeed": 0,
"tags": ["person1=Mary", "person2=Heather", "comparative=cooler", "comparative=taller"]
}
]
raw_testresult = {
"name": "Change the PERSON order",
"type": "inv",
"expect_meta": {"expected": "equal"},
"tags": [
"person1=Mary",
"person2=Heather",
"person2=Marco",
"comparative=cooler",
"comparative=taller"
],
"stats": {"nfailed": 10, "npassed": 20, "nfiltered": 20}
}
| 3,770 | 29.658537 | 96 | py |
checklist | checklist-master/checklist/viewer/suite_summarizer.py | import ipywidgets as widgets
from traitlets import Unicode, List, Dict
import os
import typing
from spacy.lang.en import English
from copy import deepcopy
try:
from IPython.core.display import display, Javascript
except:
raise Exception("This module must be run in IPython.")
DIRECTORY = os.path.abspath(os.path.dirname(__file__))
from .test_summarizer import TestSummarizer
@widgets.register
class SuiteSummarizer(TestSummarizer):
"""An testcase widget."""
_view_name = Unicode('SuiteSummarizerView').tag(sync=True)
_model_name = Unicode('SuiteSummarizerModel').tag(sync=True)
_view_module = Unicode('viewer').tag(sync=True)
_model_module = Unicode('viewer').tag(sync=True)
_view_module_version = Unicode('^0.1.0').tag(sync=True)
_model_module_version = Unicode('^0.1.0').tag(sync=True)
test_infos = List([]).tag(sync=True)
def __init__(self,
test_infos: typing.Dict,
select_test_fn: typing.Callable, \
**kwargs):
TestSummarizer.__init__(self, test_summary=None, testcases=[], **kwargs)
self.test_infos = test_infos
self.select_test_fn = select_test_fn
self.on_msg(self.handle_events)
def handle_events(self, _, content, buffers):
"""
Event handler. Users trigger python functions through the frontend interaction.
"""
if content.get('event', '') == 'apply_filter':
filter_tags = content.get("filter_tags", [])
is_fail_case = content.get("filter_fail_case", [])
self.search(filter_tags, is_fail_case)
elif content.get('event', '') == 'fetch_example':
self.fetch_example()
elif content.get('event', '') == 'switch_test':
testname = content.get("testname", "")
self.on_select_test(testname)
def on_select_test(self, testname: str) -> None:
if not self.select_test_fn:
summary, testcases = None, []
else:
summary, testcases = self.select_test_fn(testname)
self.reset_summary(summary)
self.reset_testcases(testcases) | 2,105 | 36.607143 | 87 | py |
CQMaxwell | CQMaxwell-main/RKRefErrorDatadelta10.py | import bempp.api
import numpy as np
import math
from RKconv_op import *
print("Bempp version used : " + bempp.api.__version__)
def create_timepoints(c,N,T):
m=len(c)
time_points=np.zeros((1,m*N))
for j in range(m):
time_points[0,j:m*N:m]=c[j]*1.0/N*np.ones((1,N))+np.linspace(0,1-1.0/N,N)
return T*time_points
def create_rhs(grid,dx,N,T,m):
# grid=bempp.api.shapes.cube(h=1)
OrderQF = 8
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-4
bempp.api.global_parameters.hmat.admissibility='strong'
if (m==2):
c_RK=np.array([1.0/3,1])
if (m==3):
c_RK=np.array([2.0/5-math.sqrt(6)/10,2.0/5+math.sqrt(6)/10,1])
from bempp.api.operators.boundary import maxwell
from bempp.api.operators.boundary import sparse
# multitrace = maxwell.multitrace_operator(grid, 1)
NC_space = bempp.api.function_space(grid,"NC",0)
RT_space = bempp.api.function_space(grid,"RT",0)
#curl_space = bempp.api.function_space(grid, "RBC", 0)
BC_space=bempp.api.function_space(grid, "BC",0)
SNC_space=bempp.api.function_space(grid, "SNC",0)
BRWG_space=bempp.api.function_space(grid, "B-RWG",0)
# div_space=bempp.api.function_space(grid, "B-RWG",0)
RBC_space=bempp.api.function_space(grid,"RBC",0)
# curl_space=bempp.api.function_space(grid,"RBC",0)
#from bempp.api.operators.boundary.sparse import identity as ident
# id1 = ident(div_space,div_space,curl_space).weak_form()
# print("CONDITION NUMBER : ", np.linalg.cond(bempp.api.as_matrix(id1).todense()))
dof=RT_space.global_dof_count
dof1=NC_space.global_dof_count
print(" DOF: ", dof)
rhs=np.zeros((dof+dof,N*m))
curls=np.zeros((dof,N*m))
time_points=create_timepoints(c_RK,N,T)
for j in range(m*N):
t=time_points[0,j]
def incident_field(x):
return np.array([np.exp(-50*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
#return np.array([np.exp(-200*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
def tangential_trace(x, n, domain_index, result):
result[:] = np.cross(n,np.cross(incident_field(x), n))
def curl_trace(x,n,domain_index,result):
curlU=np.array([ 0. * x[2],-100*(x[2]-t+2)*np.exp(-50*(x[2]-t+2)**2), 0. * x[2]])
result[:] = np.cross(curlU , n)
curl_fun = bempp.api.GridFunction(RT_space, fun=curl_trace,dual_space=RT_space)
trace_fun= bempp.api.GridFunction(RT_space, fun=tangential_trace,dual_space=RT_space)
rhs[0:dof,j]=trace_fun.coefficients
curlCoeffs=curl_fun.coefficients
if np.linalg.norm(curlCoeffs)>10**-9:
curls[0:dof,j]=curlCoeffs
#print("RHS NORM :", np.linalg.norm(trace_fun.coefficients))
def sinv(s,b):
return s**(-1)*b
IntegralOperator=Conv_Operator(sinv)
def HarmonicImpedance(s,b):
return 10*s**(0.5)*b
TimeImpedance=Conv_Operator(HarmonicImpedance)
if (m==2):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
if (m==3):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
rhs[0:dof,:]=np.real(ZptNeuTrace)-rhs[0:dof,:]
return rhs
def harmonic_calderon(s,b,grid):
points=np.array([[0],[0],[2]])
#normb=np.linalg.norm(b[0])+np.linalg.norm(b[1])+np.linalg.norm(b[2])
normb=np.max(np.abs(b))
bound=np.abs(s)**4*np.exp(-s.real)*normb
print("s: ",s, " maxb: ", normb, " bound : ", bound)
if bound <10**(-9):
print("JUMPED")
return np.zeros(3)
OrderQF = 8
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-4
bempp.api.global_parameters.hmat.admissibility='strong'
NC_space=bempp.api.function_space(grid, "NC",0)
RT_space=bempp.api.function_space(grid, "RT",0)
elec = -bempp.api.operators.boundary.maxwell.electric_field(RT_space, RT_space, NC_space,1j*s)
magn = -bempp.api.operators.boundary.maxwell.magnetic_field(RT_space, RT_space, NC_space, 1j*s)
identity2=bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, RT_space)
identity= -bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, NC_space)
dof=NC_space.global_dof_count
trace_fun= bempp.api.GridFunction(RT_space, coefficients=b[0:dof],dual_space=RT_space)
normb=trace_fun.l2_norm()
bound=np.abs(s)**3*np.exp(-s.real)*normb
if bound <10**(-8):
print("JUMPED")
return np.zeros(3)
zero_fun= bempp.api.GridFunction(RT_space,coefficients = b[dof:],dual_space=RT_space)
#rhs=[trace_fun,zero_fun]
id_discrete=identity2.weak_form()
b[0:dof]=id_discrete*b[0:dof]
blocks=np.array([[None,None], [None,None]])
#blocks[0,0] = -elec.weak_form()+10*s**0.5*identity2.weak_form()
blocks[0,0] = -elec.weak_form()+10*s**0.5*identity2.weak_form()
blocks[0,1] = magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,0] = -magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,1] = -elec.weak_form()
blocks=bempp.api.BlockedDiscreteOperator(blocks)
# A_mat=bempp.api.as_matrix(blocks)
# print("A_mat : ",A_mat)
# e,D=np.linalg.eig(A_mat)
# print("Eigs : ", e)
# print("Cond : ", np.linalg.cond(A_mat))
##
## trace_fun= bempp.api.GridFunction(multitrace.range_spaces[0], coefficients=b[0:dof],dual_space=multitrace.dual_to_range_spaces[0])
##
## zero_fun= bempp.api.GridFunction(multitrace.range_spaces[1],coefficients = b[dof:],dual_space=multitrace.dual_to_range_spaces[1])
##
## rhs=[trace_fun,zero_fun]
##
## #print("Still living")
##
#from bempp.api.linalg import gmres
from scipy.sparse.linalg import gmres
print("Start GMRES : ")
# def print_res(rk):
# print("Norm of residual: "+ str(np.linalg.norm(rk)))
#print(np.linalg.norm(lambda_data))
#lambda_data,info = gmres(blocks, b,tol=10**-4,restart=50,maxiter=100,callback=print_res)
lambda_data,info = gmres(blocks, b,tol=10**-5,maxiter=300)
print("INFO :", info)
#lambda_data,info = gmres(blocks, b,tol=10**-4,callback=print_res)
#print("I survived!")
#from bempp.api.linalg import lu
#lambda_data = lu(elec, trace_fun)
#lambda_data.plot()
#print("Norm lambda_data : ",np.linalg.norm(lambda_data))
#if (np.linalg.norm(lambda_data)<10**-10):
phigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[0:dof],dual_space=RT_space)
psigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[dof:2*dof],dual_space=RT_space)
slp_pot = bempp.api.operators.potential.maxwell.electric_field(RT_space, points, s*1j)
dlp_pot = bempp.api.operators.potential.maxwell.magnetic_field(RT_space, points, s*1j)
print("Evaluate field : ")
scattered_field_data = -slp_pot * phigrid+dlp_pot*psigrid
# scattered_field_H = -slp_pot * psigrid-dlp_pot*phigrid
# H = scattered_field_H.reshape(3,1)[:,0]
# print(" E : ", E, " H : ", H)
# print("Angle: ", np.dot(E,H), " Scalar product with conjugation : ", np.dot(np.conj(E),H))
# print("NORM COMBINED OPERATOR :" , np.linalg.norm(scattered_field_data)/np.linalg.norm(b))
# print(scattered_field_data)
# print("NORM ScatteredField :", np.linalg.norm(scattered_field_data))
# print("s : ", s)
# print("NORM B :" ,np.linalg.norm(b))
if np.isnan(scattered_field_data).any():
print("NAN Warning, s = ", s)
scattered_field_data=np.zeros(np.shape(scattered_field_data))
return scattered_field_data.reshape(3,1)[:,0]
def scattering_solution(dx,N,T,m):
grid=bempp.api.shapes.sphere(h=dx)
rhs=create_rhs(grid,dx,N,T,m)
def ellipticSystem(s,b):
return harmonic_calderon(s,b,grid)
ScatOperator=Conv_Operator(ellipticSystem)
#num_sol=ScatOperator.apply_convol(rhs,T)
if (m==2):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-5),method="RadauIIA-2")
if (m==3):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-5),method="RadauIIA-3")
num_sol=np.zeros((len(num_solStages[:,0]),N+1))
num_sol[:,1:N+1]=np.real(num_solStages[:,m-1:N*m:m])
return num_sol
import time
T=6
#N_ref=2**4
N_ref=2**11
tt_ref=np.linspace(0,T,N_ref+1)
#dx_ref=np.sqrt(2)**(-4)
dx_ref=np.sqrt(2)**(-9)
m=3
import matplotlib.pyplot as plt
start=time.time()
#sol_ref=scattering_solution(dx_ref,N_ref,T)
#sol_ref2=scattering_solution(dx_ref,N_ref,T)
#tt=np.linspace(0,T,N_ref+1)
#plt.plot(tt,np.abs(sol_ref[0,:]))
#plt.plot(tt,np.abs(sol_ref[0,:]-sol_ref2[0,:]))
##plt.plot(tt,resc_ref[0,:],linestyle='dashed')
##plt.plot(tt,num_sol[0,:])
#plt.show()
sol_ref=scattering_solution(dx_ref,N_ref,T,m)
np.save("data/sol_ref_absorbing_delta10_N2h11_dxsqrt2m9RK5.npy",sol_ref)
#sol_ref=np.load("data/sol_ref_absorbing_delta0p1_N2h11_dxsqrt2m10RK5.npy")
#Current Reference solutions:
#np.save("data/sol_ref_absorbing_delta0p1_N212_dxsqrt2m9RK5.npy",sol_ref)
#np.save("data/sol_ref_absorbing_delta001_N212_dxsqrt2m9RK3.npy",sol_ref)
#sol_ref=np.load("data/sol_ref_absorbing_delta1_N212_dxsqrt2m9RK3.npy")
#sol_ref=np.load("data/sol_ref_absorbing_delta001_N212_dxsqrt2m9RK3.npy")
#sol_ref=np.load("data/sol_ref_absorbing_N212_dxsqrt2m7RK5.npy")
#import scipy.io
#scipy.io.loadmat('data/Err_data_delta1.mat')
#tt=np.linspace(0,T,N_ref+1)
#plt.plot(tt,sol_ref[0,:])
#plt.show()
#plt.plot(sol_ref[0,:]**2+sol_ref[1,:]**2+sol_ref[2,:]**2)
#plt.show()
Am_space=8
Am_time=7
#Am_space=1
#Am_time=8
tau_s=np.zeros(Am_time)
h_s=np.zeros(Am_space)
errors=np.zeros((Am_space,Am_time))
m=2
for ixSpace in range(Am_space):
for ixTime in range(Am_time):
N=8*2**(ixTime)
tau_s[ixTime]=T*1.0/N
tt=np.linspace(0,T,N+1)
dx=np.sqrt(2)**(-ixSpace)
h_s[ixSpace]=dx
speed=N_ref/N
resc_ref=np.zeros((3,N+1))
# resc_ref=sol_ref
for j in range(N+1):
resc_ref[:,j] = sol_ref[:,j*speed]
#num_sol = calc_ref_sol(N,dx,F_transfer)
num_sol = scattering_solution(dx,N,T,m)
# plt.plot(tt,num_sol[0,:]**2+num_sol[1,:]**2+num_sol[2,:]**2)
# plt.plot(tt_ref,sol_ref[0,:]**2+sol_ref[1,:]**2+sol_ref[2,:]**2,linestyle='dashed')
# plt.show()
errors[ixSpace,ixTime]=np.max(np.abs(resc_ref-num_sol))
print(errors)
import scipy.io
scipy.io.savemat('data/Err_data_delta10.mat', dict( ERR=errors,h_s=h_s,tau_s=tau_s))
#scipy.io.savemat('data/Err_data_delta0p1_long.mat', dict( ERR=errors,h_s=h_s,tau_s=tau_s))
end=time.time()
print("Script Runtime: "+str((end-start)/60) +" Min")
| 11,402 | 34.194444 | 133 | py |
CQMaxwell | CQMaxwell-main/libVersions.py | import bempp.api
import scipy
import numpy
import matplotlib
print("Bempp version :", bempp.api.__version__)
print("Scipy version :", scipy.__version__)
print("Numpy version :", numpy.__version__)
print("Matplotlib version :", matplotlib.__version__)
| 251 | 27 | 53 | py |
CQMaxwell | CQMaxwell-main/FramesAndConditions.py | import numpy as np
import bempp.api
import math
from RKconv_op import *
def create_timepoints(c,N,T):
m=len(c)
time_points=np.zeros((1,m*N))
for j in range(m):
time_points[0,j:m*N:m]=c[j]*1.0/N*np.ones((1,N))+np.linspace(0,1-1.0/N,N)
return T*time_points
def create_rhs(grid,N,T,m):
#grid=bempp.api.shapes.sphere(h=dx)
if (m==2):
c_RK=np.array([1.0/3,1])
if (m==3):
c_RK=np.array([2.0/5-math.sqrt(6)/10,2.0/5+math.sqrt(6)/10,1])
from bempp.api.operators.boundary import maxwell
RT_space = bempp.api.function_space(grid,"RT",0)
# curl_space=bempp.api.function_space(grid,"RBC",0)
#from bempp.api.operators.boundary.sparse import identity as ident
# id1 = ident(div_space,div_space,curl_space).weak_form()
# print("CONDITION NUMBER : ", np.linalg.cond(bempp.api.as_matrix(id1).todense()))
dof=RT_space.global_dof_count
print(" DOF: ", dof)
rhs=np.zeros((dof+dof,N*m))
curls=np.zeros((dof,N*m))
time_points=create_timepoints(c_RK,N,T)
for j in range(m*N):
t=time_points[0,j]
def incident_field(x):
return np.array([np.exp(-50*(x[2]-t+1)**2), 0. * x[2], 0. * x[2]])
#return np.array([np.exp(-200*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
def tangential_trace(x, n, domain_index, result):
result[:] = np.cross(n,np.cross(incident_field(x), n))
def curl_trace(x,n,domain_index,result):
curlU=np.array([ 0. * x[2],-100*(x[2]-t+1)*np.exp(-50*(x[2]-t+1)**2), 0. * x[2]])
result[:] = np.cross(curlU , n)
curl_fun = bempp.api.GridFunction(RT_space, fun=curl_trace,dual_space=RT_space)
trace_fun= bempp.api.GridFunction(RT_space, fun=tangential_trace,dual_space=RT_space)
rhs[0:dof,j]=trace_fun.coefficients
curlCoeffs=curl_fun.coefficients
if np.linalg.norm(curlCoeffs)>10**-9:
curls[0:dof,j]=curlCoeffs
#print("RHS NORM :", np.linalg.norm(trace_fun.coefficients))
def sinv(s,b):
return s**(-1)*b
IntegralOperator=Conv_Operator(sinv)
def HarmonicImpedance(s,b):
return 0.1*s**(0.5)*b
TimeImpedance=Conv_Operator(HarmonicImpedance)
if (m==2):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
if (m==3):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
rhs[0:dof,:]=np.real(ZptNeuTrace)-rhs[0:dof,:]
return rhs
def harmonic_calderon(s,b,grid,points):
OrderQF = 8
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-4
bempp.api.global_parameters.hmat.admissibility='strong'
NC_space=bempp.api.function_space(grid, "NC",0)
RT_space=bempp.api.function_space(grid, "RT",0)
elec = -bempp.api.operators.boundary.maxwell.electric_field(RT_space, RT_space, NC_space,1j*s)
magn = -bempp.api.operators.boundary.maxwell.magnetic_field(RT_space, RT_space, NC_space, 1j*s)
identity2=bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, RT_space)
identity= -bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, NC_space)
dof=NC_space.global_dof_count
trace_fun= bempp.api.GridFunction(RT_space, coefficients=b[0:dof],dual_space=RT_space)
zero_fun= bempp.api.GridFunction(RT_space,coefficients = b[dof:],dual_space=RT_space)
#rhs=[trace_fun,zero_fun]
id_discrete=identity2.weak_form()
b[0:dof]=id_discrete*b[0:dof]
blocks=np.array([[None,None], [None,None]])
blocks[0,0] = -elec.weak_form()+0.1*s**0.5*identity2.weak_form()
blocks[0,1] = magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,0] = -magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,1] = -elec.weak_form()
blocks_discrete=bempp.api.BlockedDiscreteOperator(blocks)
from scipy.sparse.linalg import gmres
lambda_data,info= gmres(blocks_discrete, b)
#cond=np.linalg.cond(bempp.api.as_matrix(blocks_discrete))
print("System solved !")
import scipy.io,time
mat_contents =scipy.io.loadmat('data/cond.mat')
freqCond_old = mat_contents['freqCond']
if s in freqCond_old[0]:
print("Frequency already calculated")
else:
tp0=time.time()
blocks_mat=bempp.api.as_matrix(blocks_discrete)
# tp4=time.time()
sigmas = scipy.linalg.svdvals(blocks_mat)
norminv = min(sigmas)**(-1)
normA = max(sigmas)
cond=normA*norminv
print("Freq: ",s ," Cond: ",cond)
# print(freqCond_old)
freqCond=np.concatenate((freqCond_old,np.array([[s],[cond],[normA],[norminv]])),axis=1)
scipy.io.savemat('data/cond.mat',dict(freqCond=freqCond))
#print(np.linalg.norm(lambda_data))
#print("I survived!")
#from bempp.api.linalg import lu
#lambda_data = lu(elec, trace_fun)
#lambda_data.plot()
#print("Norm lambda_data : ",np.linalg.norm(lambda_data))
#if (np.linalg.norm(lambda_data)<10**-10):
phigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[0:dof],dual_space=RT_space)
psigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[dof:2*dof],dual_space=RT_space)
# x_a=-0.75
# x_b=0.75
# y_a=-0.25
# y_b=1.25
##
# x_a=-2
# x_b=2
# y_a=-2
# y_b=2
# n_grid_points=150
# plot_grid = np.mgrid[y_a:y_b:1j*n_grid_points, x_a:x_b:1j*n_grid_points]
## plot_grid = np.mgrid[-0.5:1:1j*n_grid_points, -1.5:1.5:1j*n_grid_points]
# #print(plot_grid)
## points = np.vstack( ( plot_grid[0].ravel() , plot_grid[1].ravel() , 0.25*np.ones(plot_grid[0].size) ) )
# points = np.vstack( ( plot_grid[0].ravel() , 0*np.ones(plot_grid[0].size) , plot_grid[1].ravel()) )
#point=np.array([[0],[0],[2]])
slp_pot = bempp.api.operators.potential.maxwell.electric_field(RT_space, points, s*1j)
dlp_pot = bempp.api.operators.potential.maxwell.magnetic_field(RT_space, points, s*1j)
scattered_field_data = -slp_pot * phigrid+dlp_pot*psigrid
# print("NORM COMBINED OPERATOR :" , np.linalg.norm(scattered_field_data)/np.linalg.norm(b))
# print(scattered_field_data)
# print("NORM ScatteredField :", np.linalg.norm(scattered_field_data))
# print("s : ", s)
# print("NORM B :" ,np.linalg.norm(b))
if np.isnan(scattered_field_data).any():
print("NAN Warning",s)
print("NORM B :" ,np.linalg.norm(b))
return np.zeros(n_grid_points**2*3)
#print(scattered_field_data.reshape(3,1)[:,0])
return scattered_field_data.reshape(n_grid_points**2*3,1)[:,0]
def scattering_solution(dx,N,T,m,points):
import scipy.io
import numpy as np
mat_contents=scipy.io.loadmat('grids/TorusDOF896.mat')
Nodes=np.array(mat_contents['Nodes']).T
rawElements=mat_contents['Elements']
for j in range(len(rawElements)):
betw=rawElements[j][0]
rawElements[j][0]=rawElements[j][1]
rawElements[j][1]=betw
Elements=np.array(rawElements).T
Elements=Elements-1
grid=bempp.api.grid_from_element_data(Nodes,Elements)
# def tangential_trace(x, n, domain_index, result):
# result[:] = n[1]
# P1_space = bempp.api.function_space(grid,"P",1)
# normal_fun = bempp.api.GridFunction(P1_space, fun=tangential_trace,dual_space=P1_space)
#normal_fun.plot()
#grid.plot()
#grid=bempp.api.shapes.sphere(h=dx)
rhs=create_rhs(grid,N,T,m)
def ellipticSystem(s,b):
return harmonic_calderon(s,b,grid,points)
ScatOperator=Conv_Operator(ellipticSystem)
#num_sol=ScatOperator.apply_convol(rhs,T)
if (m==2):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-16),method="RadauIIA-2")
if (m==3):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-16),method="RadauIIA-3")
num_sol=np.zeros((len(num_solStages[:,0]),N+1))
num_sol[:,1:N+1]=np.real(num_solStages[:,m-1:N*m:m])
return num_sol
import time
N=100
T=4
#Generate points
x_a=-1.5
x_b=1.5
y_a=-1.5
y_b=1.5
n_grid_points=300
nx=n_grid_points
nz=n_grid_points
#Initialize empty file, which will be overwritten continously with condition numbers#and the frequencies
freqCond=1j*np.array([[0],[0],[0],[0]])
import scipy.io
scipy.io.savemat('data/cond.mat',dict(freqCond=freqCond))
plot_grid = np.mgrid[y_a:y_b:1j*n_grid_points, x_a:x_b:1j*n_grid_points]
#plot_grid = np.mgrid[-0.5:1:1j*n_grid_points, -1.5:1.5:1j*n_grid_points]
#print(plot_grid)
points = np.vstack( ( plot_grid[0].ravel() , 0*np.ones(plot_grid[0].size) , plot_grid[1].ravel()) )
evals=scattering_solution(1,N,T,3,points)
u_ges=np.zeros((n_grid_points**2,N+1))
for j in range(N+1):
#matplotlib inline
import matplotlib
from matplotlib import pylab as plt
# Adjust the figure size in IPython
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
t=j*T*1.0/N
def incident_field(x):
return np.array([np.exp(-50*(x[2]-t+1)**2), 0. * x[2], 0. * x[2]])
incident_field_data = incident_field(points)
#scat_eval=np.zeros(nx*nz*3)
#incident_field_data[radius<1]=np.nan
scat_eval=evals[:,j].reshape(3,nx*nz)
# print(scat_eval)
field_data = scat_eval + incident_field_data
# field_data = scat_eval
# field_data = incident_field_data
# print("Points: ")
# print(points)
# print("Data: ")
#print(field_data)
squared_field_density = np.sum(field_data * field_data,axis = 0)
u_ges[:,j]=squared_field_density.T
#squared_field_density=field_data[2,:]
#squared_field_density[radius<1]=np.nan
#print("MAX FIELD DATA: " , max(squared_field_density))
print(max(np.abs(squared_field_density)))
plt.imshow(squared_field_density.reshape((nx, nz)).T,
cmap='coolwarm', origin='lower',
extent=[x_a, x_b, y_a, y_b])
if j==10:
plt.colorbar()
plt.clim((-1,1))
#plt.title("Squared Electric Field Density")
#plt.savefig("data/wave_images/Screen_n{}.png".format(j))
import scipy.io
scipy.io.savemat('data/delta01_dof896.mat',dict(u_ges=u_ges,N=N,T=T,plot_grid=plot_grid,points=points))
# tp1=time.time()
# print("Dense Matrix assembled, time : ", tp1-tp0)
# normA=np.linalg.norm(blocks_mat,ord=2)
# tp2=time.time()
# print("Norm A calculated, value ",normA, " time : ",tp2-tp1)
# cond=np.linalg.cond(blocks_mat)
# tp3=time.time()
# print("Cond A calculated, value ", cond," time : ",tp3-tp2)
# norminv=np.linalg.norm(np.linalg.inv(blocks_mat),ord=2)
# print("Inv A calculated, direct : ", norminv, " Previous estimate : ", cond/normA)
| 10,911 | 33.1 | 113 | py |
CQMaxwell | CQMaxwell-main/RKRefErrorDatadelta01.py | import bempp.api
import numpy as np
import math
from RKconv_op import *
print("Bempp version used : " + bempp.api.__version__)
def create_timepoints(c,N,T):
m=len(c)
time_points=np.zeros((1,m*N))
for j in range(m):
time_points[0,j:m*N:m]=c[j]*1.0/N*np.ones((1,N))+np.linspace(0,1-1.0/N,N)
return T*time_points
def create_rhs(grid,dx,N,T,m):
# grid=bempp.api.shapes.cube(h=1)
OrderQF = 8
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-4
bempp.api.global_parameters.hmat.admissibility='strong'
if (m==2):
c_RK=np.array([1.0/3,1])
if (m==3):
c_RK=np.array([2.0/5-math.sqrt(6)/10,2.0/5+math.sqrt(6)/10,1])
from bempp.api.operators.boundary import maxwell
from bempp.api.operators.boundary import sparse
# multitrace = maxwell.multitrace_operator(grid, 1)
NC_space = bempp.api.function_space(grid,"NC",0)
RT_space = bempp.api.function_space(grid,"RT",0)
#curl_space = bempp.api.function_space(grid, "RBC", 0)
#BC_space=bempp.api.function_space(grid, "BC",0)
#SNC_space=bempp.api.function_space(grid, "SNC",0)
#BRWG_space=bempp.api.function_space(grid, "B-RWG",0)
# #div_space=bempp.api.function_space(grid, "B-RWG",0)
#RBC_space=bempp.api.function_space(grid,"RBC",0)
# curl_space=bempp.api.function_space(grid,"RBC",0)
#from bempp.api.operators.boundary.sparse import identity as ident
# id1 = ident(div_space,div_space,curl_space).weak_form()
# print("CONDITION NUMBER : ", np.linalg.cond(bempp.api.as_matrix(id1).todense()))
dof=RT_space.global_dof_count
dof1=NC_space.global_dof_count
print(" DOF: ", dof)
rhs=np.zeros((dof+dof,N*m))
curls=np.zeros((dof,N*m))
time_points=create_timepoints(c_RK,N,T)
for j in range(m*N):
t=time_points[0,j]
def incident_field(x):
return np.array([np.exp(-50*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
#return np.array([np.exp(-200*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
@bempp.api.real_callable
def tangential_trace(x, n, domain_index, result):
result[:] = np.cross(n,np.cross(incident_field(x), n))
@bempp.api.real_callable
def curl_trace(x,n,domain_index,result):
curlU=np.array([ 0. * x[2],-100*(x[2]-t+2)*np.exp(-50*(x[2]-t+2)**2), 0. * x[2]])
result[:] = np.cross(curlU , n)
curl_fun = bempp.api.GridFunction(RT_space, fun=curl_trace,dual_space=RT_space)
trace_fun= bempp.api.GridFunction(RT_space, fun=tangential_trace,dual_space=RT_space)
rhs[0:dof,j]=trace_fun.coefficients
curlCoeffs=curl_fun.coefficients
if np.linalg.norm(curlCoeffs)>10**-9:
curls[0:dof,j]=curlCoeffs
#print("RHS NORM :", np.linalg.norm(trace_fun.coefficients))
def sinv(s,b):
return s**(-1)*b
IntegralOperator=Conv_Operator(sinv)
def HarmonicImpedance(s,b):
return 0.1*s**(0.5)*b
TimeImpedance=Conv_Operator(HarmonicImpedance)
if (m==2):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
if (m==3):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
rhs[0:dof,:]=np.real(ZptNeuTrace)-rhs[0:dof,:]
return rhs
def harmonic_calderon(s,b,grid):
points=np.array([[0],[0],[2]])
#normb=np.linalg.norm(b[0])+np.linalg.norm(b[1])+np.linalg.norm(b[2])
normb=np.max(np.abs(b))
bound=np.abs(s)**4*np.exp(-s.real)*normb
print("s: ",s, " maxb: ", normb, " bound : ", bound)
if bound <10**(-9):
print("JUMPED")
return np.zeros(3)
OrderQF = 8
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-4
bempp.api.global_parameters.hmat.admissibility='strong'
NC_space=bempp.api.function_space(grid, "NC",0)
RT_space=bempp.api.function_space(grid, "RT",0)
elec = -bempp.api.operators.boundary.maxwell.electric_field(RT_space, RT_space, NC_space,1j*s)
magn = -bempp.api.operators.boundary.maxwell.magnetic_field(RT_space, RT_space, NC_space, 1j*s)
identity2=bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, RT_space)
identity= -bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, NC_space)
dof=NC_space.global_dof_count
trace_fun= bempp.api.GridFunction(RT_space, coefficients=b[0:dof],dual_space=RT_space)
normb=trace_fun.l2_norm()
bound=np.abs(s)**3*np.exp(-s.real)*normb
if bound <10**(-8):
print("JUMPED")
return np.zeros(3)
zero_fun= bempp.api.GridFunction(RT_space,coefficients = b[dof:],dual_space=RT_space)
#rhs=[trace_fun,zero_fun]
id_discrete=identity2.weak_form()
b[0:dof]=id_discrete*b[0:dof]
blocks=np.array([[None,None], [None,None]])
#blocks[0,0] = -elec.weak_form()+10*s**0.5*identity2.weak_form()
blocks[0,0] = -elec.weak_form()+0.1*s**0.5*identity2.weak_form()
blocks[0,1] = magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,0] = -magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,1] = -elec.weak_form()
blocks=bempp.api.BlockedDiscreteOperator(blocks)
# A_mat=bempp.api.as_matrix(blocks)
# print("A_mat : ",A_mat)
# e,D=np.linalg.eig(A_mat)
# print("Eigs : ", e)
# print("Cond : ", np.linalg.cond(A_mat))
##
## trace_fun= bempp.api.GridFunction(multitrace.range_spaces[0], coefficients=b[0:dof],dual_space=multitrace.dual_to_range_spaces[0])
##
## zero_fun= bempp.api.GridFunction(multitrace.range_spaces[1],coefficients = b[dof:],dual_space=multitrace.dual_to_range_spaces[1])
##
## rhs=[trace_fun,zero_fun]
##
## #print("Still living")
##
#from bempp.api.linalg import gmres
from scipy.sparse.linalg import gmres
print("Start GMRES : ")
# def print_res(rk):
# print("Norm of residual: "+ str(np.linalg.norm(rk)))
#print(np.linalg.norm(lambda_data))
#lambda_data,info = gmres(blocks, b,tol=10**-4,restart=50,maxiter=100,callback=print_res)
lambda_data,info = gmres(blocks, b,tol=10**-5,maxiter=300)
print("INFO :", info)
#lambda_data,info = gmres(blocks, b,tol=10**-4,callback=print_res)
#print("I survived!")
#from bempp.api.linalg import lu
#lambda_data = lu(elec, trace_fun)
#lambda_data.plot()
#print("Norm lambda_data : ",np.linalg.norm(lambda_data))
#if (np.linalg.norm(lambda_data)<10**-10):
phigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[0:dof],dual_space=RT_space)
psigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[dof:2*dof],dual_space=RT_space)
slp_pot = bempp.api.operators.potential.maxwell.electric_field(RT_space, points, s*1j)
dlp_pot = bempp.api.operators.potential.maxwell.magnetic_field(RT_space, points, s*1j)
print("Evaluate field : ")
scattered_field_data = -slp_pot * phigrid+dlp_pot*psigrid
# scattered_field_H = -slp_pot * psigrid-dlp_pot*phigrid
# H = scattered_field_H.reshape(3,1)[:,0]
# print(" E : ", E, " H : ", H)
# print("Angle: ", np.dot(E,H), " Scalar product with conjugation : ", np.dot(np.conj(E),H))
# print("NORM COMBINED OPERATOR :" , np.linalg.norm(scattered_field_data)/np.linalg.norm(b))
# print(scattered_field_data)
# print("NORM ScatteredField :", np.linalg.norm(scattered_field_data))
# print("s : ", s)
# print("NORM B :" ,np.linalg.norm(b))
if np.isnan(scattered_field_data).any():
print("NAN Warning, s = ", s)
scattered_field_data=np.zeros(np.shape(scattered_field_data))
return scattered_field_data.reshape(3,1)[:,0]
def scattering_solution(dx,N,T,m):
grid=bempp.api.shapes.sphere(h=dx)
rhs=create_rhs(grid,dx,N,T,m)
def ellipticSystem(s,b):
return harmonic_calderon(s,b,grid)
ScatOperator=Conv_Operator(ellipticSystem)
#num_sol=ScatOperator.apply_convol(rhs,T)
if (m==2):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-5),method="RadauIIA-2")
if (m==3):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-5),method="RadauIIA-3")
num_sol=np.zeros((len(num_solStages[:,0]),N+1))
num_sol[:,1:N+1]=np.real(num_solStages[:,m-1:N*m:m])
return num_sol
import time
T=6
#N_ref=2**4
N_ref=2**11
tt_ref=np.linspace(0,T,N_ref+1)
#dx_ref=np.sqrt(2)**(0)
dx_ref=np.sqrt(2)**(-9)
m=3
import matplotlib.pyplot as plt
start=time.time()
#sol_ref=scattering_solution(dx_ref,N_ref,T)
#sol_ref2=scattering_solution(dx_ref,N_ref,T)
#tt=np.linspace(0,T,N_ref+1)
#plt.plot(tt,np.abs(sol_ref[0,:]))
#plt.plot(tt,np.abs(sol_ref[0,:]-sol_ref2[0,:]))
##plt.plot(tt,resc_ref[0,:],linestyle='dashed')
##plt.plot(tt,num_sol[0,:])
#plt.show()
sol_ref=scattering_solution(dx_ref,N_ref,T,m)
np.save("data/sol_ref_absorbing_delta0p1_N2h11_dxsqrt2m9RK5.npy",sol_ref)
#sol_ref=np.load("data/sol_ref_absorbing_delta0p1_N2h11_dxsqrt2m10RK5.npy")
#Current Reference solutions:
#np.save("data/sol_ref_absorbing_delta0p1_N212_dxsqrt2m9RK5.npy",sol_ref)
#np.save("data/sol_ref_absorbing_delta001_N212_dxsqrt2m9RK3.npy",sol_ref)
#sol_ref=np.load("data/sol_ref_absorbing_delta1_N212_dxsqrt2m9RK3.npy")
#sol_ref=np.load("data/sol_ref_absorbing_delta001_N212_dxsqrt2m9RK3.npy")
#sol_ref=np.load("data/sol_ref_absorbing_N212_dxsqrt2m7RK5.npy")
#import scipy.io
#scipy.io.loadmat('data/Err_data_delta1.mat')
#tt=np.linspace(0,T,N_ref+1)
#plt.plot(tt,sol_ref[0,:])
#plt.show()
#plt.plot(sol_ref[0,:]**2+sol_ref[1,:]**2+sol_ref[2,:]**2)
#plt.show()
Am_space=8
Am_time=7
#Am_space=1
#Am_time=8
tau_s=np.zeros(Am_time)
h_s=np.zeros(Am_space)
errors=np.zeros((Am_space,Am_time))
m=2
for ixSpace in range(Am_space):
for ixTime in range(Am_time):
N=8*2**(ixTime)
tau_s[ixTime]=T*1.0/N
tt=np.linspace(0,T,N+1)
dx=np.sqrt(2)**(-ixSpace)
h_s[ixSpace]=dx
speed=N_ref/N
resc_ref=np.zeros((3,N+1))
# resc_ref=sol_ref
for j in range(N+1):
resc_ref[:,j] = sol_ref[:,j*speed]
#num_sol = calc_ref_sol(N,dx,F_transfer)
num_sol = scattering_solution(dx,N,T,m)
# plt.plot(tt,num_sol[0,:]**2+num_sol[1,:]**2+num_sol[2,:]**2)
# plt.plot(tt_ref,sol_ref[0,:]**2+sol_ref[1,:]**2+sol_ref[2,:]**2,linestyle='dashed')
# plt.show()
errors[ixSpace,ixTime]=np.max(np.abs(resc_ref-num_sol))
print(errors)
import scipy.io
scipy.io.savemat('data/Err_data_delta01.mat', dict( ERR=errors,h_s=h_s,tau_s=tau_s))
#scipy.io.savemat('data/Err_data_delta0p1_long.mat', dict( ERR=errors,h_s=h_s,tau_s=tau_s))
end=time.time()
print("Script Runtime: "+str((end-start)/60) +" Min")
| 11,463 | 34.058104 | 133 | py |
CQMaxwell | CQMaxwell-main/d10RKRefErrorData.py | import bempp.api
import numpy as np
import math
from RKconv_op import *
def create_timepoints(c,N,T):
m=len(c)
time_points=np.zeros((1,m*N))
for j in range(m):
time_points[0,j:m*N:m]=c[j]*1.0/N*np.ones((1,N))+np.linspace(0,1-1.0/N,N)
return T*time_points
def create_rhs(grid,dx,N,T,m):
# grid=bempp.api.shapes.cube(h=1)
OrderQF = 7
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-4
bempp.api.global_parameters.hmat.admissibility='strong'
if (m==2):
c_RK=np.array([1.0/3,1])
if (m==3):
c_RK=np.array([2.0/5-math.sqrt(6)/10,2.0/5+math.sqrt(6)/10,1])
from bempp.api.operators.boundary import maxwell
from bempp.api.operators.boundary import sparse
multitrace = maxwell.multitrace_operator(grid, 1)
RWG_space = bempp.api.function_space(grid,"RWG",0)
NC_space = bempp.api.function_space(grid,"NC",0)
RT_space = bempp.api.function_space(grid,"RT",0)
#curl_space = bempp.api.function_space(grid, "RBC", 0)
BC_space=bempp.api.function_space(grid, "BC",0)
SNC_space=bempp.api.function_space(grid, "SNC",0)
BRWG_space=bempp.api.function_space(grid, "B-RWG",0)
# div_space=bempp.api.function_space(grid, "B-RWG",0)
RBC_space=bempp.api.function_space(grid,"RBC",0)
# curl_space=bempp.api.function_space(grid,"RBC",0)
#from bempp.api.operators.boundary.sparse import identity as ident
# id1 = ident(div_space,div_space,curl_space).weak_form()
# print("CONDITION NUMBER : ", np.linalg.cond(bempp.api.as_matrix(id1).todense()))
dof=multitrace.range_spaces[0].global_dof_count
dof1=RBC_space.global_dof_count
print(" RWG: ", dof)
print(" RBC: ", dof1)
rhs=np.zeros((dof+dof,N*m))
curls=np.zeros((dof,N*m))
time_points=create_timepoints(c_RK,N,T)
for j in range(m*N):
t=time_points[0,j]
def incident_field(x):
return np.array([np.exp(-50*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
#return np.array([np.exp(-200*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
def tangential_trace(x, n, domain_index, result):
result[:] = np.cross(n,np.cross(incident_field(x), n))
def curl_trace(x,n,domain_index,result):
curlU=np.array([ 0. * x[2],-100*(x[2]-t+2)*np.exp(-50*(x[2]-t+2)**2), 0. * x[2]])
result[:] = np.cross(curlU , n)
curl_fun = bempp.api.GridFunction(RT_space, fun=curl_trace,dual_space=RT_space)
trace_fun= bempp.api.GridFunction(RT_space, fun=tangential_trace,dual_space=RT_space)
rhs[0:dof,j]=trace_fun.coefficients
curlCoeffs=curl_fun.coefficients
if np.linalg.norm(curlCoeffs)>10**-9:
curls[0:dof,j]=curlCoeffs
#print("RHS NORM :", np.linalg.norm(trace_fun.coefficients))
def sinv(s,b):
return s**(-1)*b
IntegralOperator=Conv_Operator(sinv)
def HarmonicImpedance(s,b):
return 10*s**(0.5)*b
TimeImpedance=Conv_Operator(HarmonicImpedance)
if (m==2):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
if (m==3):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
rhs[0:dof,:]=np.real(ZptNeuTrace)-rhs[0:dof,:]
return rhs
def harmonic_calderon(s,b,grid):
points=np.array([[0],[0],[2]])
#normb=np.linalg.norm(b[0])+np.linalg.norm(b[1])+np.linalg.norm(b[2])
normb=np.max(np.abs(b))
bound=np.abs(s)**3*np.exp(-s.real*2)*normb
print("s: ",s, " maxb: ", normb, " bound : ", bound)
if bound <10**(-5):
print("JUMPED")
return np.zeros(3)
if normb <10**(-6):
print("JUMPED")
return np.zeros(3)
OrderQF = 7
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-3
bempp.api.global_parameters.hmat.admissibility='strong'
NC_space=bempp.api.function_space(grid, "NC",0)
RT_space=bempp.api.function_space(grid, "RT",0)
elec = -bempp.api.operators.boundary.maxwell.electric_field(RT_space, RT_space, NC_space,1j*s)
magn = -bempp.api.operators.boundary.maxwell.magnetic_field(RT_space, RT_space, NC_space, 1j*s)
identity2=bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, RT_space)
identity= -bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, NC_space)
dof=NC_space.global_dof_count
trace_fun= bempp.api.GridFunction(RT_space, coefficients=b[0:dof],dual_space=RT_space)
normb=trace_fun.l2_norm()
bound=np.abs(s)**3*np.exp(-2*s.real)*normb
if bound <10**(-3):
print("JUMPED")
return np.zeros(3)
zero_fun= bempp.api.GridFunction(RT_space,coefficients = b[dof:],dual_space=RT_space)
#rhs=[trace_fun,zero_fun]
id_discrete=identity2.weak_form()
b[0:dof]=id_discrete*b[0:dof]
blocks=np.array([[None,None], [None,None]])
#blocks[0,0] = -elec.weak_form()+10*s**0.5*identity2.weak_form()
blocks[0,0] = -elec.weak_form()+10*s**0.5*identity2.weak_form()
blocks[0,1] = magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,0] = -magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,1] = -elec.weak_form()
blocks=bempp.api.BlockedDiscreteOperator(blocks)
# A_mat=bempp.api.as_matrix(blocks)
# print("A_mat : ",A_mat)
# e,D=np.linalg.eig(A_mat)
# print("Eigs : ", e)
# print("Cond : ", np.linalg.cond(A_mat))
##
## trace_fun= bempp.api.GridFunction(multitrace.range_spaces[0], coefficients=b[0:dof],dual_space=multitrace.dual_to_range_spaces[0])
##
## zero_fun= bempp.api.GridFunction(multitrace.range_spaces[1],coefficients = b[dof:],dual_space=multitrace.dual_to_range_spaces[1])
##
## rhs=[trace_fun,zero_fun]
##
## #print("Still living")
##
#from bempp.api.linalg import gmres
from scipy.sparse.linalg import gmres
print("Start GMRES : ")
# def print_res(rk):
# print("Norm of residual: "+ str(np.linalg.norm(rk)))
#print(np.linalg.norm(lambda_data))
#lambda_data,info = gmres(blocks, b,tol=10**-4,restart=50,maxiter=100,callback=print_res)
lambda_data,info = gmres(blocks, b,tol=10**-4,maxiter=100)
print("INFO :", info)
#lambda_data,info = gmres(blocks, b,tol=10**-4,callback=print_res)
#print("I survived!")
#from bempp.api.linalg import lu
#lambda_data = lu(elec, trace_fun)
#lambda_data.plot()
#print("Norm lambda_data : ",np.linalg.norm(lambda_data))
#if (np.linalg.norm(lambda_data)<10**-10):
phigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[0:dof],dual_space=RT_space)
psigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[dof:2*dof],dual_space=RT_space)
slp_pot = bempp.api.operators.potential.maxwell.electric_field(RT_space, points, s*1j)
dlp_pot = bempp.api.operators.potential.maxwell.magnetic_field(RT_space, points, s*1j)
print("Evaluate field : ")
scattered_field_data = -slp_pot * phigrid+dlp_pot*psigrid
# scattered_field_H = -slp_pot * psigrid-dlp_pot*phigrid
# H = scattered_field_H.reshape(3,1)[:,0]
# print(" E : ", E, " H : ", H)
# print("Angle: ", np.dot(E,H), " Scalar product with conjugation : ", np.dot(np.conj(E),H))
# print("NORM COMBINED OPERATOR :" , np.linalg.norm(scattered_field_data)/np.linalg.norm(b))
# print(scattered_field_data)
# print("NORM ScatteredField :", np.linalg.norm(scattered_field_data))
# print("s : ", s)
# print("NORM B :" ,np.linalg.norm(b))
if np.isnan(scattered_field_data).any():
print("NAN Warning, s = ", s)
scattered_field_data=np.zeros(np.shape(scattered_field_data))
return scattered_field_data.reshape(3,1)[:,0]
def scattering_solution(dx,N,T,m):
grid=bempp.api.shapes.sphere(h=dx)
rhs=create_rhs(grid,dx,N,T,m)
def ellipticSystem(s,b):
return harmonic_calderon(s,b,grid)
ScatOperator=Conv_Operator(ellipticSystem)
#num_sol=ScatOperator.apply_convol(rhs,T)
if (m==2):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-4),method="RadauIIA-2")
if (m==3):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-4),method="RadauIIA-3")
num_sol=np.zeros((len(num_solStages[:,0]),N+1))
num_sol[:,1:N+1]=np.real(num_solStages[:,m-1:N*m:m])
return num_sol
import time
T=6
N_ref=2**10
tt_ref=np.linspace(0,T,N_ref+1)
dx_ref=np.sqrt(2)**(-10)
m=3
import matplotlib.pyplot as plt
start=time.time()
#sol_ref=scattering_solution(dx_ref,N_ref,T)
#sol_ref2=scattering_solution(dx_ref,N_ref,T)
#tt=np.linspace(0,T,N_ref+1)
#plt.plot(tt,np.abs(sol_ref[0,:]))
#plt.plot(tt,np.abs(sol_ref[0,:]-sol_ref2[0,:]))
##plt.plot(tt,resc_ref[0,:],linestyle='dashed')
##plt.plot(tt,num_sol[0,:])
#plt.show()
sol_ref=scattering_solution(dx_ref,N_ref,T,m)
np.save("data/sol_ref_absorbing_delta10_N2h10_dxsqrt2m10RK5.npy",sol_ref)
#Current Reference solutions:
#np.save("data/sol_ref_absorbing_delta0p1_N212_dxsqrt2m9RK5.npy",sol_ref)
#np.save("data/sol_ref_absorbing_delta001_N212_dxsqrt2m9RK3.npy",sol_ref)
#sol_ref=np.load("data/sol_ref_absorbing_delta1_N212_dxsqrt2m9RK3.npy")
#sol_ref=np.load("data/sol_ref_absorbing_delta001_N212_dxsqrt2m9RK3.npy")
#sol_ref=np.load("data/sol_ref_absorbing_N212_dxsqrt2m7RK5.npy")
#import scipy.io
#scipy.io.loadmat('data/Err_data_delta1.mat')
#tt=np.linspace(0,T,N_ref+1)
#plt.plot(tt,sol_ref[0,:])
#plt.show()
#plt.plot(sol_ref[0,:]**2+sol_ref[1,:]**2+sol_ref[2,:]**2)
#plt.show()
Am_space=9
Am_time=7
#Am_space=1
#Am_time=8
tau_s=np.zeros(Am_time)
h_s=np.zeros(Am_space)
errors=np.zeros((Am_space,Am_time))
m=2
for ixSpace in range(Am_space):
for ixTime in range(Am_time):
N=8*2**(ixTime)
tau_s[ixTime]=T*1.0/N
tt=np.linspace(0,T,N+1)
dx=np.sqrt(2)**(-ixSpace)
h_s[ixSpace]=dx
speed=N_ref/N
resc_ref=np.zeros((3,N+1))
# resc_ref=sol_ref
for j in range(N+1):
resc_ref[:,j] = sol_ref[:,j*speed]
#num_sol = calc_ref_sol(N,dx,F_transfer)
num_sol = scattering_solution(dx,N,T,m)
# plt.plot(tt,num_sol[0,:]**2+num_sol[1,:]**2+num_sol[2,:]**2)
# plt.plot(tt_ref,sol_ref[0,:]**2+sol_ref[1,:]**2+sol_ref[2,:]**2,linestyle='dashed')
# plt.show()
errors[ixSpace,ixTime]=np.max(np.abs(resc_ref-num_sol))
print(errors)
import scipy.io
# scipy.io.savemat('data/Err_data_delta0p1.mat', dict( ERR=errors,h_s=h_s,tau_s=tau_s))
#scipy.io.savemat('data/Err_data_delta0p1_long.mat', dict( ERR=errors,h_s=h_s,tau_s=tau_s))
end=time.time()
print("Script Runtime: "+str((end-start)/60) +" Min")
| 11,396 | 33.853211 | 133 | py |
CQMaxwell | CQMaxwell-main/Old Scripts/RKtemp.py | import bempp.api
import math
from RKconv_op import *
print("Bempp version used : " + bempp.api.__version__)
def create_timepoints(c,N,T):
m=len(c)
time_points=np.zeros((1,m*N))
for j in range(m):
time_points[0,j:m*N:m]=c[j]*1.0/N*np.ones((1,N))+np.linspace(0,1-1.0/N,N)
return T*time_points
def create_rhs(grid,dx,N,T,m):
# grid=bempp.api.shapes.cube(h=1)
OrderQF = 8
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-4
bempp.api.global_parameters.hmat.admissibility='strong'
if (m==2):
c_RK=np.array([1.0/3,1])
if (m==3):
c_RK=np.array([2.0/5-math.sqrt(6)/10,2.0/5+math.sqrt(6)/10,1])
from bempp.api.operators.boundary import maxwell
from bempp.api.operators.boundary import sparse
# multitrace = maxwell.multitrace_operator(grid, 1)
NC_space = bempp.api.function_space(grid,"NC",0)
RT_space = bempp.api.function_space(grid,"RT",0)
#curl_space = bempp.api.function_space(grid, "RBC", 0)
BC_space=bempp.api.function_space(grid, "BC",0)
SNC_space=bempp.api.function_space(grid, "SNC",0)
BRWG_space=bempp.api.function_space(grid, "B-RWG",0)
# div_space=bempp.api.function_space(grid, "B-RWG",0)
RBC_space=bempp.api.function_space(grid,"RBC",0)
# curl_space=bempp.api.function_space(grid,"RBC",0)
#from bempp.api.operators.boundary.sparse import identity as ident
# id1 = ident(div_space,div_space,curl_space).weak_form()
# print("CONDITION NUMBER : ", np.linalg.cond(bempp.api.as_matrix(id1).todense()))
dof=RT_space.global_dof_count
dof1=NC_space.global_dof_count
print(" DOF: ", dof)
rhs=np.zeros((dof+dof,N*m))
curls=np.zeros((dof,N*m))
time_points=create_timepoints(c_RK,N,T)
for j in range(m*N):
t=time_points[0,j]
def incident_field(x):
return np.array([np.exp(-50*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
#return np.array([np.exp(-200*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
def tangential_trace(x, n, domain_index, result):
result[:] = np.cross(n,np.cross(incident_field(x), n))
def curl_trace(x,n,domain_index,result):
curlU=np.array([ 0. * x[2],-100*(x[2]-t+2)*np.exp(-50*(x[2]-t+2)**2), 0. * x[2]])
result[:] = np.cross(curlU , n)
curl_fun = bempp.api.GridFunction(RT_space, fun=curl_trace,dual_space=RT_space)
trace_fun= bempp.api.GridFunction(RT_space, fun=tangential_trace,dual_space=RT_space)
rhs[0:dof,j]=trace_fun.coefficients
curlCoeffs=curl_fun.coefficients
if np.linalg.norm(curlCoeffs)>10**-9:
curls[0:dof,j]=curlCoeffs
#print("RHS NORM :", np.linalg.norm(trace_fun.coefficients))
def sinv(s,b):
return s**(-1)*b
IntegralOperator=Conv_Operator(sinv)
def HarmonicImpedance(s,b):
return 10*s**(0.5)*b
TimeImpedance=Conv_Operator(HarmonicImpedance)
if (m==2):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
if (m==3):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
rhs[0:dof,:]=np.real(ZptNeuTrace)-rhs[0:dof,:]
return rhs
def harmonic_calderon(s,b,grid):
points=np.array([[0],[0],[2]])
#normb=np.linalg.norm(b[0])+np.linalg.norm(b[1])+np.linalg.norm(b[2])
normb=np.max(np.abs(b))
bound=np.abs(s)**4*np.exp(-s.real)*normb
print("s: ",s, " maxb: ", normb, " bound : ", bound)
if bound <10**(-9):
print("JUMPED")
return np.zeros(3)
OrderQF = 8
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-4
bempp.api.global_parameters.hmat.admissibility='strong'
NC_space=bempp.api.function_space(grid, "NC",0)
RT_space=bempp.api.function_space(grid, "RT",0)
elec = -bempp.api.operators.boundary.maxwell.electric_field(RT_space, RT_space, NC_space,1j*s)
magn = -bempp.api.operators.boundary.maxwell.magnetic_field(RT_space, RT_space, NC_space, 1j*s)
identity2=bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, RT_space)
identity= -bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, NC_space)
dof=NC_space.global_dof_count
trace_fun= bempp.api.GridFunction(RT_space, coefficients=b[0:dof],dual_space=RT_space)
normb=trace_fun.l2_norm()
bound=np.abs(s)**3*np.exp(-s.real)*normb
if bound <10**(-8):
print("JUMPED")
return np.zeros(3)
zero_fun= bempp.api.GridFunction(RT_space,coefficients = b[dof:],dual_space=RT_space)
#rhs=[trace_fun,zero_fun]
id_discrete=identity2.weak_form()
b[0:dof]=id_discrete*b[0:dof]
blocks=np.array([[None,None], [None,None]])
#blocks[0,0] = -elec.weak_form()+10*s**0.5*identity2.weak_form()
blocks[0,0] = -elec.weak_form()+10*s**0.5*identity2.weak_form()
blocks[0,1] = magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,0] = -magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,1] = -elec.weak_form()
blocks=bempp.api.BlockedDiscreteOperator(blocks)
# A_mat=bempp.api.as_matrix(blocks)
# print("A_mat : ",A_mat)
# e,D=np.linalg.eig(A_mat)
# print("Eigs : ", e)
# print("Cond : ", np.linalg.cond(A_mat))
##
## trace_fun= bempp.api.GridFunction(multitrace.range_spaces[0], coefficients=b[0:dof],dual_space=multitrace.dual_to_range_spaces[0])
##
## zero_fun= bempp.api.GridFunction(multitrace.range_spaces[1],coefficients = b[dof:],dual_space=multitrace.dual_to_range_spaces[1])
##
## rhs=[trace_fun,zero_fun]
##
## #print("Still living")
##
#from bempp.api.linalg import gmres
from scipy.sparse.linalg import gmres
print("Start GMRES : ")
# def print_res(rk):
# print("Norm of residual: "+ str(np.linalg.norm(rk)))
#print(np.linalg.norm(lambda_data))
#lambda_data,info = gmres(blocks, b,tol=10**-4,restart=50,maxiter=100,callback=print_res)
lambda_data,info = gmres(blocks, b,tol=10**-5,maxiter=300)
print("INFO :", info)
#lambda_data,info = gmres(blocks, b,tol=10**-4,callback=print_res)
#print("I survived!")
#from bempp.api.linalg import lu
#lambda_data = lu(elec, trace_fun)
#lambda_data.plot()
#print("Norm lambda_data : ",np.linalg.norm(lambda_data))
#if (np.linalg.norm(lambda_data)<10**-10):
phigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[0:dof],dual_space=RT_space)
psigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[dof:2*dof],dual_space=RT_space)
slp_pot = bempp.api.operators.potential.maxwell.electric_field(RT_space, points, s*1j)
dlp_pot = bempp.api.operators.potential.maxwell.magnetic_field(RT_space, points, s*1j)
print("Evaluate field : ")
scattered_field_data = -slp_pot * phigrid+dlp_pot*psigrid
# scattered_field_H = -slp_pot * psigrid-dlp_pot*phigrid
# H = scattered_field_H.reshape(3,1)[:,0]
# print(" E : ", E, " H : ", H)
# print("Angle: ", np.dot(E,H), " Scalar product with conjugation : ", np.dot(np.conj(E),H))
# print("NORM COMBINED OPERATOR :" , np.linalg.norm(scattered_field_data)/np.linalg.norm(b))
# print(scattered_field_data)
# print("NORM ScatteredField :", np.linalg.norm(scattered_field_data))
# print("s : ", s)
# print("NORM B :" ,np.linalg.norm(b))
if np.isnan(scattered_field_data).any():
print("NAN Warning, s = ", s)
scattered_field_data=np.zeros(np.shape(scattered_field_data))
return scattered_field_data.reshape(3,1)[:,0]
def scattering_solution(dx,N,T,m):
grid=bempp.api.shapes.sphere(h=dx)
rhs=create_rhs(grid,dx,N,T,m)
def ellipticSystem(s,b):
return harmonic_calderon(s,b,grid)
ScatOperator=Conv_Operator(ellipticSystem)
#num_sol=ScatOperator.apply_convol(rhs,T)
if (m==2):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-7),method="RadauIIA-2")
if (m==3):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-7),method="RadauIIA-3")
num_sol=np.zeros((len(num_solStages[:,0]),N+1))
num_sol[:,1:N+1]=np.real(num_solStages[:,m-1:N*m:m])
return num_sol
import time
T=6
#N_ref=2**4
N_ref=2**8
tt_ref=np.linspace(0,T,N_ref+1)
#dx_ref=np.sqrt(2)**(-4)
dx_ref=np.sqrt(2)**(-0)
m=3
import matplotlib.pyplot as plt
start=time.time()
#sol_ref=scattering_solution(dx_ref,N_ref,T)
#sol_ref2=scattering_solution(dx_ref,N_ref,T)
#tt=np.linspace(0,T,N_ref+1)
#plt.plot(tt,np.abs(sol_ref[0,:]))
#plt.plot(tt,np.abs(sol_ref[0,:]-sol_ref2[0,:]))
##plt.plot(tt,resc_ref[0,:],linestyle='dashed')
##plt.plot(tt,num_sol[0,:])
#plt.show()
sol_ref=scattering_solution(dx_ref,N_ref,T,m)
#np.save("data/sol_ref_absorbing_delta10_N2h11_dxsqrt2m9RK5.npy",sol_ref)
#sol_ref=np.load("data/sol_ref_absorbing_delta0p1_N2h11_dxsqrt2m10RK5.npy")
#Current Reference solutions:
#np.save("data/sol_ref_absorbing_delta0p1_N212_dxsqrt2m9RK5.npy",sol_ref)
#np.save("data/sol_ref_absorbing_delta001_N212_dxsqrt2m9RK3.npy",sol_ref)
#sol_ref=np.load("data/sol_ref_absorbing_delta1_N212_dxsqrt2m9RK3.npy")
#sol_ref=np.load("data/sol_ref_absorbing_delta001_N212_dxsqrt2m9RK3.npy")
#sol_ref=np.load("data/sol_ref_absorbing_N212_dxsqrt2m7RK5.npy")
#import scipy.io
#scipy.io.loadmat('data/Err_data_delta1.mat')
#tt=np.linspace(0,T,N_ref+1)
#plt.plot(tt,sol_ref[0,:])
#plt.show()
#plt.plot(sol_ref[0,:]**2+sol_ref[1,:]**2+sol_ref[2,:]**2)
#plt.show()
Am_space=8
Am_time=7
#Am_space=1
#Am_time=8
tau_s=np.zeros(Am_time)
h_s=np.zeros(Am_space)
errors=np.zeros((Am_space,Am_time))
m=2
for ixSpace in range(Am_space):
for ixTime in range(Am_time):
N=8*2**(ixTime)
tau_s[ixTime]=T*1.0/N
tt=np.linspace(0,T,N+1)
dx=np.sqrt(2)**(-ixSpace)
h_s[ixSpace]=dx
speed=N_ref/N
resc_ref=np.zeros((3,N+1))
# resc_ref=sol_ref
for j in range(N+1):
resc_ref[:,j] = sol_ref[:,j*speed]
#num_sol = calc_ref_sol(N,dx,F_transfer)
num_sol = scattering_solution(dx,N,T,m)
# plt.plot(tt,num_sol[0,:]**2+num_sol[1,:]**2+num_sol[2,:]**2)
# plt.plot(tt_ref,sol_ref[0,:]**2+sol_ref[1,:]**2+sol_ref[2,:]**2,linestyle='dashed')
# plt.show()
errors[ixSpace,ixTime]=np.max(np.abs(resc_ref-num_sol))
print(errors)
import scipy.io
# scipy.io.savemat('data/Err_data_delta10.mat', dict( ERR=errors,h_s=h_s,tau_s=tau_s))
#scipy.io.savemat('data/Err_data_delta0p1_long.mat', dict( ERR=errors,h_s=h_s,tau_s=tau_s))
end=time.time()
print("Script Runtime: "+str((end-start)/60) +" Min")
| 11,384 | 34.247678 | 133 | py |
CQMaxwell | CQMaxwell-main/Old Scripts/MaxwellFrames.py | import bempp.api
import numpy as np
import math
from RKconv_op import *
def create_timepoints(c,N,T):
m=len(c)
time_points=np.zeros((1,m*N))
for j in range(m):
time_points[0,j:m*N:m]=c[j]*1.0/N*np.ones((1,N))+np.linspace(0,1-1.0/N,N)
return T*time_points
def create_rhs(grid,N,T,m):
#grid=bempp.api.shapes.sphere(h=dx)
if (m==2):
c_RK=np.array([1.0/3,1])
if (m==3):
c_RK=np.array([2.0/5-math.sqrt(6)/10,2.0/5+math.sqrt(6)/10,1])
from bempp.api.operators.boundary import maxwell
RT_space = bempp.api.function_space(grid,"RT",0)
# curl_space=bempp.api.function_space(grid,"RBC",0)
#from bempp.api.operators.boundary.sparse import identity as ident
# id1 = ident(div_space,div_space,curl_space).weak_form()
# print("CONDITION NUMBER : ", np.linalg.cond(bempp.api.as_matrix(id1).todense()))
dof=RT_space.global_dof_count
print(" DOF: ", dof)
rhs=np.zeros((dof+dof,N*m))
curls=np.zeros((dof,N*m))
time_points=create_timepoints(c_RK,N,T)
for j in range(m*N):
t=time_points[0,j]
def incident_field(x):
return np.array([np.exp(-50*(x[2]-t+1)**2), 0. * x[2], 0. * x[2]])
#return np.array([np.exp(-200*(x[2]-t+2)**2), 0. * x[2], 0. * x[2]])
def tangential_trace(x, n, domain_index, result):
result[:] = np.cross(n,np.cross(incident_field(x), n))
def curl_trace(x,n,domain_index,result):
curlU=np.array([ 0. * x[2],-100*(x[2]-t+1)*np.exp(-50*(x[2]-t+1)**2), 0. * x[2]])
result[:] = np.cross(curlU , n)
curl_fun = bempp.api.GridFunction(RT_space, fun=curl_trace,dual_space=RT_space)
trace_fun= bempp.api.GridFunction(RT_space, fun=tangential_trace,dual_space=RT_space)
rhs[0:dof,j]=trace_fun.coefficients
curlCoeffs=curl_fun.coefficients
if np.linalg.norm(curlCoeffs)>10**-9:
curls[0:dof,j]=curlCoeffs
#print("RHS NORM :", np.linalg.norm(trace_fun.coefficients))
def sinv(s,b):
return s**(-1)*b
IntegralOperator=Conv_Operator(sinv)
def HarmonicImpedance(s,b):
return 0.1*s**(0.5)*b
TimeImpedance=Conv_Operator(HarmonicImpedance)
if (m==2):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-2",show_progress=False)
if (m==3):
curls=IntegralOperator.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
ZptNeuTrace=TimeImpedance.apply_RKconvol(curls,T,method="RadauIIA-3",show_progress=False)
rhs[0:dof,:]=np.real(ZptNeuTrace)-rhs[0:dof,:]
return rhs
def harmonic_calderon(s,b,grid,points):
OrderQF = 8
#tol= np.finfo(float).eps
bempp.api.global_parameters.quadrature.near.max_rel_dist = 2
bempp.api.global_parameters.quadrature.near.single_order =OrderQF-1
bempp.api.global_parameters.quadrature.near.double_order = OrderQF-1
bempp.api.global_parameters.quadrature.medium.max_rel_dist =4
bempp.api.global_parameters.quadrature.medium.single_order =OrderQF-2
bempp.api.global_parameters.quadrature.medium.double_order =OrderQF-2
bempp.api.global_parameters.quadrature.far.single_order =OrderQF-3
bempp.api.global_parameters.quadrature.far.double_order =OrderQF-3
bempp.api.global_parameters.quadrature.double_singular = OrderQF
bempp.api.global_parameters.hmat.eps=10**-4
bempp.api.global_parameters.hmat.admissibility='strong'
NC_space=bempp.api.function_space(grid, "NC",0)
RT_space=bempp.api.function_space(grid, "RT",0)
elec = -bempp.api.operators.boundary.maxwell.electric_field(RT_space, RT_space, NC_space,1j*s)
magn = -bempp.api.operators.boundary.maxwell.magnetic_field(RT_space, RT_space, NC_space, 1j*s)
identity2=bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, RT_space)
identity= -bempp.api.operators.boundary.sparse.identity(RT_space, RT_space, NC_space)
dof=NC_space.global_dof_count
trace_fun= bempp.api.GridFunction(RT_space, coefficients=b[0:dof],dual_space=RT_space)
zero_fun= bempp.api.GridFunction(RT_space,coefficients = b[dof:],dual_space=RT_space)
#rhs=[trace_fun,zero_fun]
id_discrete=identity2.weak_form()
b[0:dof]=id_discrete*b[0:dof]
blocks=np.array([[None,None], [None,None]])
blocks[0,0] = -elec.weak_form()+0.1*s**0.5*identity2.weak_form()
blocks[0,1] = magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,0] = -magn.weak_form()-1.0/2*identity.weak_form()
blocks[1,1] = -elec.weak_form()
blocks=bempp.api.BlockedDiscreteOperator(blocks)
#from bempp.api.linalg import gmres
from scipy.sparse.linalg import gmres
lambda_data,info = gmres(blocks, b,tol=10**-5)
#print(np.linalg.norm(lambda_data))
#print("I survived!")
#from bempp.api.linalg import lu
#lambda_data = lu(elec, trace_fun)
#lambda_data.plot()
#print("Norm lambda_data : ",np.linalg.norm(lambda_data))
#if (np.linalg.norm(lambda_data)<10**-10):
phigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[0:dof],dual_space=RT_space)
psigrid=bempp.api.GridFunction(RT_space,coefficients=lambda_data[dof:2*dof],dual_space=RT_space)
# x_a=-0.75
# x_b=0.75
# y_a=-0.25
# y_b=1.25
##
# x_a=-2
# x_b=2
# y_a=-2
# y_b=2
# n_grid_points=150
# plot_grid = np.mgrid[y_a:y_b:1j*n_grid_points, x_a:x_b:1j*n_grid_points]
## plot_grid = np.mgrid[-0.5:1:1j*n_grid_points, -1.5:1.5:1j*n_grid_points]
# #print(plot_grid)
## points = np.vstack( ( plot_grid[0].ravel() , plot_grid[1].ravel() , 0.25*np.ones(plot_grid[0].size) ) )
# points = np.vstack( ( plot_grid[0].ravel() , 0*np.ones(plot_grid[0].size) , plot_grid[1].ravel()) )
#point=np.array([[0],[0],[2]])
slp_pot = bempp.api.operators.potential.maxwell.electric_field(RT_space, points, s*1j)
dlp_pot = bempp.api.operators.potential.maxwell.magnetic_field(RT_space, points, s*1j)
scattered_field_data = -slp_pot * phigrid+dlp_pot*psigrid
# print("NORM COMBINED OPERATOR :" , np.linalg.norm(scattered_field_data)/np.linalg.norm(b))
# print(scattered_field_data)
# print("NORM ScatteredField :", np.linalg.norm(scattered_field_data))
# print("s : ", s)
# print("NORM B :" ,np.linalg.norm(b))
if np.isnan(scattered_field_data).any():
print("NAN Warning",s)
print("NORM B :" ,np.linalg.norm(b))
return np.zeros(n_grid_points**2*3)
#print(scattered_field_data.reshape(3,1)[:,0])
return scattered_field_data.reshape(n_grid_points**2*3,1)[:,0]
def scattering_solution(dx,N,T,m,points):
import scipy.io
import numpy as np
mat_contents=scipy.io.loadmat('grids/TorusDOF896.mat')
Nodes=np.array(mat_contents['Nodes']).T
rawElements=mat_contents['Elements']
for j in range(len(rawElements)):
betw=rawElements[j][0]
rawElements[j][0]=rawElements[j][1]
rawElements[j][1]=betw
Elements=np.array(rawElements).T
Elements=Elements-1
grid=bempp.api.grid_from_element_data(Nodes,Elements)
# def tangential_trace(x, n, domain_index, result):
# result[:] = n[1]
# P1_space = bempp.api.function_space(grid,"P",1)
# normal_fun = bempp.api.GridFunction(P1_space, fun=tangential_trace,dual_space=P1_space)
#normal_fun.plot()
#grid.plot()
#grid=bempp.api.shapes.sphere(h=dx)
rhs=create_rhs(grid,N,T,m)
def ellipticSystem(s,b):
return harmonic_calderon(s,b,grid,points)
ScatOperator=Conv_Operator(ellipticSystem)
#num_sol=ScatOperator.apply_convol(rhs,T)
if (m==2):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-8),method="RadauIIA-2")
if (m==3):
num_solStages=ScatOperator.apply_RKconvol(rhs,T,cutoff=10**(-8),method="RadauIIA-3")
num_sol=np.zeros((len(num_solStages[:,0]),N+1))
num_sol[:,1:N+1]=np.real(num_solStages[:,m-1:N*m:m])
return num_sol
import time
N=200
T=4
#Generate points
x_a=-1.5
x_b=1.5
y_a=-1.5
y_b=1.5
n_grid_points=300
nx=n_grid_points
nz=n_grid_points
plot_grid = np.mgrid[y_a:y_b:1j*n_grid_points, x_a:x_b:1j*n_grid_points]
#plot_grid = np.mgrid[-0.5:1:1j*n_grid_points, -1.5:1.5:1j*n_grid_points]
#print(plot_grid)
points = np.vstack( ( plot_grid[0].ravel() , 0*np.ones(plot_grid[0].size) , plot_grid[1].ravel()) )
evals=scattering_solution(1,N,T,3,points)
u_ges=np.zeros((n_grid_points**2,N+1))
for j in range(N+1):
#matplotlib inline
import matplotlib
from matplotlib import pylab as plt
# Adjust the figure size in IPython
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
# A=points[0]
# B=points[2]
# max_grid=np.zeros((nx,nz))
## for k in range(nz):
## A[k]=A[k]-0.5
## B[k]=B[k]-0.5
## for i in range(nx):
## max_grid[i,k]=max(np.abs(A[i]-0.5),np.abs(B[k]-0.5))
t=j*T*1.0/N
def incident_field(x):
return np.array([np.exp(-50*(x[2]-t+1)**2), 0. * x[2], 0. * x[2]])
incident_field_data = incident_field(points)
#scat_eval=np.zeros(nx*nz*3)
#incident_field_data[radius<1]=np.nan
scat_eval=evals[:,j].reshape(3,nx*nz)
# print(scat_eval)
field_data = scat_eval + incident_field_data
# field_data = scat_eval
# field_data = incident_field_data
# print("Points: ")
# print(points)
# print("Data: ")
#print(field_data)
squared_field_density = np.sum(field_data * field_data,axis = 0)
u_ges[:,j]=squared_field_density.T
#squared_field_density=field_data[2,:]
#squared_field_density[radius<1]=np.nan
#print("MAX FIELD DATA: " , max(squared_field_density))
print(max(np.abs(squared_field_density)))
#plt.imshow(squared_field_density.reshape((nx, nz)).T,
# cmap='coolwarm', origin='lower',
# extent=[x_a, x_b, y_a, y_b])
#if j==10:
# plt.colorbar()
#plt.clim((-1,1))
#plt.title("Squared Electric Field Density")
#plt.savefig("data/wave_images/Screen_n{}.png".format(j))
import scipy.io
scipy.io.savemat('data/delta01_dof896.mat',dict(u_ges=u_ges,N=N,T=T,plot_grid=plot_grid,points=points))
| 9,655 | 33 | 113 | py |
Semi-Online-KD | Semi-Online-KD-master/main.py | import argparse
import yaml
import os
import torch
from trainer import build_trainer
from utils.utils import save_code, save_opts
def main():
parser = argparse.ArgumentParser(description='KnowledgeDistillation')
parser.add_argument('--configs', '-c', dest='params', default='./configs/sokd.yaml')
parser.add_argument('--name', '-n', dest='name', default='debug')
parser.add_argument('--seed', '-s', type=int, default=8888)
parser.add_argument('--gpus', '-g', type=str, default='0')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
with open(args.params) as f:
params = yaml.load(f, Loader=yaml.FullLoader)
params['name'] = args.name
params['seed'] = args.seed
params['device'] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
trainer = build_trainer(**params)
save_opts(params, trainer.save_folder)
save_code(trainer.repo_path, f"{trainer.save_folder}/code", ['results', 'datasets'])
trainer.run()
trainer.logger.info(f"{trainer.experimental_name} done!")
if __name__ == '__main__':
main()
| 1,116 | 31.852941 | 88 | py |
Semi-Online-KD | Semi-Online-KD-master/trainer/base_trainer.py | from datetime import datetime
from tensorboardX import SummaryWriter
import os
import logging
from utils.utils import create_logger, output_process, fix_random
class BaseTrainer(object):
def __init__(self, experimental_name='debug', seed=None):
# BASE
self.current_time = datetime.now().strftime('%b.%d_%H.%M.%S')
self.writer = None
self.logger = None
self.experimental_name = experimental_name
self.seed = seed
# SAVE PATH
self.repo_path = os.getcwd()
self.save_folder = f'{self.repo_path}/results/{experimental_name}'
output_process(self.save_folder) # create folder or not
self._init_log() # get log and writer
if seed is not None:
fix_random(seed)
def _init_log(self):
self.writer = SummaryWriter(log_dir=self.save_folder)
self.logger = create_logger()
fh = logging.FileHandler(filename=f'{self.save_folder}/log.txt')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
| 1,135 | 31.457143 | 93 | py |
Semi-Online-KD | Semi-Online-KD-master/trainer/vanilla.py | import torch.nn as nn
import torch
from tqdm import tqdm
from trainer.base_trainer import BaseTrainer
from models import model_dict
from utils.utils import count_parameters_in_MB, AverageMeter, accuracy, save_checkpoint
from dataset import get_dataloader
class Vanilla(BaseTrainer):
def __init__(self, params, experimental_name=''):
# Data
self.data_name = params.get('data_name')
self.data_path = params.get('data_path')
self.num_classes = params.get('num_classes', 100)
self.train_loader = None
self.test_loader = None
# Model
self.model_name = params.get('model_name')
self.model_depth = params.get('model_depth', '')
self.model_widen = params.get('model_widen', '')
self.model_checkpoint = params.get('model_checkpoint')
self.model = None
self.testing = params.get('evaluation', False)
# Base training settings
self.start_epoch = params.get('start_epoch', 1)
self.epochs = params.get('epochs', 200)
self.batch_size = params.get('batch_size', 128)
self.lr = params.get('lr', 0.1)
self.device = params.get('device', 'cuda')
self.milestones = params.get('milestones', [200])
self.optimizer = None
self.scheduler = None
self.criterion_ce = nn.CrossEntropyLoss()
# Log
self.best_top1 = 0
self.best_top5 = 0
self.best_epoch = 0
seed = params.get('seed', None)
experimental_name = f"{self.__class__.__name__}_{self.model_name}{self.model_depth}-{self.model_widen}_{self.data_name}_" \
f"{experimental_name}/{params.get('name', 'debug')}"
super().__init__(experimental_name, seed)
def run(self):
self.set_data()
self.set_model()
self.set_optimizer_scheduler()
self.train_model()
def train_model(self):
if self.model_checkpoint:
state_dict = torch.load(self.model_checkpoint)
self.model.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.scheduler.load_state_dict(state_dict['scheduler'])
self.best_top1 = state_dict['best_top1']
self.best_top5 = state_dict['best_top5']
self.best_epoch = state_dict['best_epoch']
self.start_epoch = state_dict['start_epoch']
self.logger.info("Load model's checkpoint done!")
if self.testing:
self.logger.info("Start testing model...")
top1, top5 = self.evaluation_vanilla(self.model)
self.logger.info(f"top1:{top1.avg:.2f}, top5:{top5.avg:.2f}")
else:
self.logger.info("Start training model...")
for epoch in tqdm(range(self.start_epoch, self.epochs + 1)):
self.logger.info(f'Epoch[{epoch}/{self.epochs}]')
self.train()
top1, top5 = self.evaluation(self.model)
self.writer.add_scalar('test/top1', top1.avg, epoch)
is_best = False
if top1.avg > self.best_top1:
self.best_top1 = top1.avg
self.best_top5 = top5.avg
self.best_epoch = epoch
is_best = True
state_dict = {'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'best_top1': self.best_top1,
'best_top5': self.best_top5,
'best_epoch': self.best_epoch,
'start_epoch': epoch
}
save_checkpoint(state_dict, is_best, f"{self.save_folder}/model")
self.logger.info(
f"Test=> lr:{self.optimizer.param_groups[0]['lr']}, "
f"top1:{top1.avg:.2f}, top5:{top5.avg:.2f} "
f"@Best:({self.best_top1}, {self.best_top5}, {self.best_epoch})")
self.scheduler.step()
def train(self):
self.model.train()
total_loss = AverageMeter()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
for batch_id, (data, targets) in enumerate(self.train_loader):
data = data.to(self.device)
targets = targets.to(self.device)
output = self.model(data)
loss = self.criterion_ce(output, targets)
top1, top5 = accuracy(output, targets, topk=(1, 5))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_loss.update(loss.item(), data.size(0))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
info_str = f"Train=> total_loss: {total_loss.avg}, " \
f"prec@1: {total_top1.avg}, prec@5: {total_top5.avg}"
self.logger.info(info_str)
@torch.no_grad()
def evaluation_vanilla(self, model):
model.eval()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
for batch_id, (data, targets) in enumerate(self.test_loader):
data = data.to(self.device)
targets = targets.to(self.device)
output_S = model(data)
top1, top5 = accuracy(output_S, targets, topk=(1, 5))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
return total_top1, total_top5
@torch.no_grad()
def evaluation(self, model):
model.eval()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
for batch_id, (data, targets) in enumerate(self.test_loader):
data = data.to(self.device)
targets = targets.to(self.device)
output_S = model(data)
top1, top5 = accuracy(output_S, targets, topk=(1, 5))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
return total_top1, total_top5
def set_data(self):
self.train_loader, self.test_loader = get_dataloader(self.data_name, self.data_path, self.batch_size)
def set_model(self):
if self.data_name.startswith('CIFAR'):
if self.model_name == 'wideresnet':
self.model = model_dict[f"wrn_{self.model_depth}_{self.model_widen}"](num_classes=self.num_classes)
else:
assert False, f'Not considering {self.model_name}'
if torch.cuda.device_count() > 1:
self.model = torch.nn.DataParallel(self.model)
self.model = self.model.to(self.device)
else:
assert False, f"Not considering {self.data_name}"
def set_optimizer_scheduler(self):
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=5e-4)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, self.milestones)
| 7,150 | 41.820359 | 131 | py |
Semi-Online-KD | Semi-Online-KD-master/trainer/sokd.py | import torch
from trainer.vanilla import Vanilla
from utils.utils import accuracy, AverageMeter, save_checkpoint
from kd_losses import SoftTarget
from models import model_dict
class SemiOnlineKnowledgeDistillation(Vanilla):
def __init__(self, params):
# Model
self.teacher_name = params.get('teacher_name')
self.teacher_depth = params.get('teacher_depth', '')
self.teacher_widen = params.get('teacher_widen', '')
self.teacher_checkpoint = params.get('teacher_checkpoint')
self.teacher = None
# Coefficient
self.lambda_kd = params.get('lambda_kd', 1)
self.lambda_ce = params.get('lambda_ce', 1)
self.auxiliary_lambda_kd_t = params.get('auxiliary_lambda_kd_t', 1)
self.auxiliary_lambda_kd_s = params.get('auxiliary_lambda_kd_s', 1)
self.auxiliary_lambda_ce = params.get('auxiliary_lambda_ce', 1)
self.lr_auxiliary = params.get('lr_auxiliary', 0.05)
self.distillation_name = params.get('distillation_name', 'soft_target')
self.criterion_kd = SoftTarget(T=4)
self.auxiliary_index = -3
self.best_top1_A = 0
experimental_name = f"Teacher-{self.teacher_name}{self.teacher_depth}-{self.teacher_widen}"
super().__init__(params, experimental_name)
def run(self):
self.set_data()
self.set_model()
self.load_teacher()
self.set_optimizer_scheduler()
self.train_model()
def load_teacher(self):
if self.teacher_name == 'wideresnet':
self.teacher = model_dict[f"wrn_{self.teacher_depth}_{self.teacher_widen}"](
num_classes=self.num_classes)
else:
assert False, f'Not considering {self.teacher_name}'
if torch.cuda.device_count() > 1:
self.teacher = torch.nn.DataParallel(self.teacher)
self.teacher = self.teacher.to(self.device)
if self.teacher_checkpoint:
state = torch.load(self.teacher_checkpoint)['model']
teacher_state_dict = self.teacher.state_dict()
loaded_state = {k: v for k, v in state.items() if k in teacher_state_dict}
teacher_state_dict.update(loaded_state)
self.teacher.load_state_dict(teacher_state_dict)
self.logger.info("Load teacher's checkpoint done!")
else:
self.logger.info("No teacher's checkpoint!")
top1, _ = self.evaluation_vanilla(self.teacher)
self.logger.info(f'Teacher ACC: {top1.avg}')
for k, v in self.teacher.named_parameters():
if 'auxiliary' not in k:
v.requires_grad = False
def train(self):
self.model.train()
self.teacher.train()
# log of student
total_loss = AverageMeter()
total_loss_ce = AverageMeter()
total_loss_kd = AverageMeter()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
# log of auxiliary
total_loss_A = AverageMeter()
total_loss_ce_A = AverageMeter()
total_loss_kd_T_A = AverageMeter()
total_loss_kd_S_A = AverageMeter()
total_top1_A = AverageMeter()
total_top5_A = AverageMeter()
for batch_id, (data, targets) in enumerate(self.train_loader):
data = data.to(self.device)
targets = targets.to(self.device)
feature_S, output_S = self.model(data, is_feat=True)
feature_T, output_T = self.teacher(data, is_feat=True)
feature_A, output_A = self.teacher.auxiliary_forward(feature_T[self.auxiliary_index].detach())
# loss of auxiliary
loss_kd_T_A, loss_kd_S_A, loss_kd = self.calculate_kd(self.distillation_name, feature_S, feature_A,
feature_T, output_S, output_A, output_T)
loss_ce_A = self.criterion_ce(output_A, targets) * self.auxiliary_lambda_ce
loss_A = loss_ce_A + loss_kd_T_A + loss_kd_S_A
# loss of student
loss_ce = self.criterion_ce(output_S, targets) * self.lambda_ce
loss = loss_ce + loss_kd
loss_total = loss_A + loss
# accuracy
top1, top5 = accuracy(output_S, targets, topk=(1, 5))
top1_A, top5_A = accuracy(output_A, targets, topk=(1, 5))
# update parameter of student
self.optimizer.zero_grad()
loss_total.backward()
self.optimizer.step()
# update log of student
total_loss.update(loss.item(), data.size(0))
total_loss_ce.update(loss_ce.item(), data.size(0))
total_loss_kd.update(loss_kd.item(), data.size(0))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
# update log of auxiliary
total_loss_A.update(loss_A.item(), data.size(0))
total_loss_ce_A.update(loss_ce_A.item(), data.size(0))
total_loss_kd_T_A.update(loss_kd_T_A.item(), data.size(0))
total_loss_kd_S_A.update(loss_kd_S_A.item(), data.size(0))
total_top1_A.update(top1_A.item(), data.size(0))
total_top5_A.update(top5_A.item(), data.size(0))
info_str = f"Train (Branch)=> loss_ce: {total_loss_ce_A.avg:.4f}, loss_kd_T_A: {total_loss_kd_T_A.avg:.4f}," \
f"loss_kd_S_A: {total_loss_kd_S_A.avg:.4f}, prec@1: {total_top1_A.avg:.2f}, prec@5: {total_top5_A.avg:.2f}"
self.logger.info(info_str)
info_str = f"Train (Student)=> loss_ce: {total_loss_ce.avg:.4f}, loss_kd: {total_loss_kd.avg:.4f}, " \
f"prec@1: {total_top1.avg:.2f}, prec@5: {total_top5.avg:.2f}"
self.logger.info(info_str)
return total_top1, total_top5
@torch.no_grad()
def evaluation(self, model):
model.eval()
self.teacher.eval()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
total_top1_t = AverageMeter()
total_top5_t = AverageMeter()
for batch_id, (data, targets) in enumerate(self.test_loader):
data = data.to(self.device)
targets = targets.to(self.device)
output_S = model(data)
feature_T, output_T = self.teacher(data, is_feat=True)
_, output_A = self.teacher.auxiliary_forward(feature_T[self.auxiliary_index].detach())
top1, top5 = accuracy(output_S, targets, topk=(1, 5))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
top1_t, top5_t = accuracy(output_A, targets, topk=(1, 5))
total_top1_t.update(top1_t.item(), data.size(0))
total_top5_t.update(top5_t.item(), data.size(0))
if total_top1_t.avg > self.best_top1_A:
self.best_top1_A = total_top1_t.avg
state_dict = {'model': self.teacher.state_dict()}
save_checkpoint(state_dict, True, f"{self.save_folder}/teacher")
self.logger.info(
f"Test (branch)=> lr:{self.optimizer.param_groups[1]['lr']}, "
f"top1_A:{total_top1_t.avg:.2f}, top5_A:{total_top5_t.avg:.2f}, @Best: {self.best_top1_A}")
return total_top1, total_top5
def set_optimizer_scheduler(self):
self.optimizer = torch.optim.SGD([{'params': self.model.parameters()},
{'params': self.teacher.parameters(), 'lr': self.lr_auxiliary}],
lr=self.lr, momentum=0.9, weight_decay=5e-4)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, self.milestones)
def calculate_kd(self, name, feature_S, feature_A, feature_T, output_S, output_A, output_T):
if name == 'soft_target':
loss_kd_T_A = self.criterion_kd(output_A, output_T.detach()) * self.auxiliary_lambda_kd_t
loss_kd_S_A = self.criterion_kd(output_A, output_S.detach()) * self.auxiliary_lambda_kd_s
loss_S = self.criterion_kd(output_S, output_A.detach()) * self.lambda_kd
else:
assert NotImplementedError, f"No considering {name}"
return loss_kd_T_A, loss_kd_S_A, loss_S
| 8,214 | 46.212644 | 126 | py |
Semi-Online-KD | Semi-Online-KD-master/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
self.auxiliary_block = nn.Sequential(
deepcopy(self.block3)
)
self.auxiliary_bn1 = deepcopy(self.bn1)
self.auxiliary_fc = deepcopy(self.fc)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.block1)
feat_m.append(self.block2)
feat_m.append(self.block3)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.block1(out)
f1 = out
out = self.block2(out)
f2 = out
out = self.block3(out)
f3 = out
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f4 = out
out = self.fc(out)
if is_feat:
if preact:
f1 = self.block2.layer[0].bn1(f1)
f2 = self.block3.layer[0].bn1(f2)
f3 = self.bn1(f3)
return [f0, f1, f2, f3, f4], out
else:
return out
def auxiliary_forward(self, feat):
out = self.auxiliary_block(feat)
f0 = out
out = self.relu(self.auxiliary_bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f1 = out
out = self.auxiliary_fc(out)
return [f0, f1], out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
| 5,436 | 33.411392 | 116 | py |
Semi-Online-KD | Semi-Online-KD-master/kd_losses/st.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class SoftTarget(nn.Module):
'''
Distilling the Knowledge in a Neural Network
https://arxiv.org/pdf/1503.02531.pdf
'''
def __init__(self, T):
super(SoftTarget, self).__init__()
self.T = T
def forward(self, out_s, out_t):
loss = F.kl_div(F.log_softmax(out_s/self.T, dim=1),
F.softmax(out_t/self.T, dim=1),
reduction='batchmean') * self.T * self.T
return loss | 563 | 23.521739 | 53 | py |
Semi-Online-KD | Semi-Online-KD-master/utils/utils.py | import logging
import colorlog
import os
import time
import shutil
import torch
import random
import numpy as np
from shutil import copyfile
def create_logger():
"""
Setup the logging environment
"""
log = logging.getLogger() # root logger
log.setLevel(logging.DEBUG)
format_str = '%(asctime)s - %(levelname)-8s - %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
if os.isatty(2):
cformat = '%(log_color)s' + format_str
colors = {'DEBUG': 'reset',
'INFO': 'reset',
'WARNING': 'bold_yellow',
'ERROR': 'bold_red',
'CRITICAL': 'bold_red'}
formatter = colorlog.ColoredFormatter(cformat, date_format,
log_colors=colors)
else:
formatter = logging.Formatter(format_str, date_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
return logging.getLogger(__name__)
class TimeRecorder(object):
"""
Recode training time.
"""
def __init__(self, start_epoch, epochs, logger):
self.total_time = 0.
self.remaining_time = 0.
self.epochs = epochs
self.start_epoch = start_epoch
self.logger = logger
self.start_time = time.time()
def update(self):
now_time = time.time()
elapsed_time = now_time - self.start_time
self.start_time = now_time
self.total_time += elapsed_time
self.remaining_time = elapsed_time * (self.epochs - self.start_epoch)
self.start_epoch += 1
self.logger.info(f'Cost time=>{self.format_time(self.total_time)}')
self.logger.info(f'Remaining time=>{self.format_time(self.remaining_time)}')
@staticmethod
def format_time(time):
h = time // 3600
m = (time % 3600) // 60
s = (time % 3600) % 60
return f'{h}h{m}m{s:.2f}s'
def output_process(output_path):
if os.path.exists(output_path):
print("{} file exist!".format(output_path))
action = input("Select Action: d (delete) / q (quit):").lower().strip()
act = action
if act == 'd':
shutil.rmtree(output_path)
else:
raise OSError("Directory {} exits!".format(output_path))
if not os.path.exists(output_path):
os.makedirs(output_path)
def save_code(src, dst, exclude=[]):
"""
Save experimental codes.
"""
for f in os.listdir(src):
# Do not save experimental results
if f in exclude:
continue
src_file = os.path.join(src, f)
file_split = f.split(".")
if len(file_split) >= 2:
if not os.path.isdir(dst):
os.makedirs(dst)
dst_file = os.path.join(dst, f)
try:
shutil.copyfile(src=src_file, dst=dst_file)
except:
print("Copy file error! src: {}, dst: {}".format(src_file, dst_file))
elif os.path.isdir(src_file):
deeper_dst = os.path.join(dst, f)
save_code(src_file, deeper_dst)
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
# res.append(correct_k)
return res
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_opts(opts, save_path='.'):
with open(f"{save_path}/opts.txt", 'w') as f:
for k, v in opts.items():
f.write(str(k) + ": " + str(v) + '\n')
def save_checkpoint(state_dict, is_best, folder_name='.'):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
checkpoint_name = f"{folder_name}/checkpoint.pth.tar"
torch.save(state_dict, checkpoint_name)
if is_best:
model_name = f"{folder_name}/best_model.pth.tar"
copyfile(checkpoint_name, model_name)
def fix_random(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
return True
def count_parameters_in_MB(model):
return sum(np.prod(v.size()) for name, v in model.named_parameters()) / 1e6
| 4,987 | 27.340909 | 85 | py |
Simplified_DMC | Simplified_DMC-master/location_dmc.py | import argparse
import os
import torch
from torch.utils.data import DataLoader
from torch import optim
import numpy as np
from data.MUSIC_dataset import MUSIC_Dataset, MUSIC_AV_Classify
from model.base_model import resnet18
from model.dmc_model import DMC_NET
from sklearn import cluster, metrics
import numpy as np
from sklearn.preprocessing import normalize
from torch import nn
import torch.nn.functional as F
import pickle
def batch_organize(audio_data, posi_img_data, nega_img_data, posi_label, nega_label):
batch_audio_data = torch.zeros(audio_data.shape[0] * 2, audio_data.shape[1], audio_data.shape[2],
audio_data.shape[3])
batch_image_data = torch.zeros(posi_img_data.shape[0] * 2, posi_img_data.shape[1], posi_img_data.shape[2],
posi_img_data.shape[3])
batch_labels = torch.zeros(audio_data.shape[0] * 2)
class_labels = torch.zeros(audio_data.shape[0] * 2)
for i in range(audio_data.shape[0]):
batch_audio_data[i * 2, :] = audio_data[i, :]
batch_audio_data[i * 2 + 1, :] = audio_data[i, :]
batch_image_data[i * 2, :] = posi_img_data[i, :]
batch_image_data[i * 2 + 1, :] = nega_img_data[i, :]
batch_labels[i * 2] = 1
batch_labels[i * 2 + 1] = 0
class_labels[i * 2] = posi_label[i]
class_labels[i * 2 + 1] = nega_label[i]
return batch_audio_data, batch_image_data, batch_labels, class_labels
def eva_metric2(predict, gt, pair_num=2):
num = int(predict.shape[0]/pair_num)
correct = 0
for i in range(num):
pos = predict[pair_num*i]
flag = True
for j in range(pair_num-1):
neg = predict[pair_num*i+j+1]
if pos >= neg:
flag = False
if flag == True:
correct += 1
return correct / num
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin=5.):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, output, target, size_average=True):
distances = output.pow(2).sum(1) # squared distances
losses = 0.5 * (target.float() * distances +
(1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2))
return losses.mean() if size_average else losses.sum()
def location_model_train(model, data_loader, optimizer, criterion):
model.train()
accs = 0
count = 0
losses = 0
for i, data in enumerate(data_loader, 0):
if i % 200 == 0:
print('location batch:%d' % i)
audio_data, posi_img_data, nega_img_data, posi_label, nega_label, _, _ = data
audio_data, image_data, av_labels, class_labels = batch_organize(audio_data, posi_img_data, nega_img_data, posi_label, nega_label)
audio_data, image_data, av_labels = audio_data.type(torch.FloatTensor).cuda(), \
image_data.type(torch.FloatTensor).cuda(), \
av_labels.type(torch.FloatTensor).cuda()
optimizer.zero_grad()
av_outputs, _, _ = model(image_data, audio_data)
loss = criterion(av_outputs, av_labels)
loss.backward()
optimizer.step()
losses += loss.detach().cpu().numpy()
# acc = eva_metric2(av_outputs.detach().cpu().numpy(), av_labels.cpu().numpy())
# accs += acc
count += 1
print('location loss is %.3f ' % (losses / count))
return accs / count
def location_model_eva(model, data_loader):
model.eval()
accs = 0
num = len(data_loader.dataset)
count = 0
results = {}
with torch.no_grad():
for i, data in enumerate(data_loader, 0):
audio_data, posi_img_data, nega_img_data, posi_label, nega_label, img_path, _ = data
audio_data, image_data, av_labels, class_labels = batch_organize(audio_data, posi_img_data, nega_img_data,
posi_label, nega_label)
audio_data, image_data = audio_data.type(torch.FloatTensor).cuda(), image_data.type(torch.FloatTensor).cuda()
av_outputs, av_maps, av_dists = model(image_data, audio_data)
obj_localization = av_maps.detach().cpu().numpy()
obj_localization = obj_localization[::2]
av_dists = av_dists[::2]
# accs += eva_metric2(av_outputs.detach().cpu().numpy(), av_labels.numpy())
count += 1
_, idx = torch.sort(av_dists, dim=1)
idx = idx[:, 1].detach().cpu().numpy()
for k in range(len(img_path)):
results[img_path[k][:-4]] = obj_localization[k]
pickle.dump(results, open('dmc.pkl', 'wb'))
return accs / count
def main():
parser = argparse.ArgumentParser(description='AID_PRETRAIN')
parser.add_argument('--data_list_dir', type=str,
default='./data/data_indicator/music/solo')
parser.add_argument('--data_dir', type=str, default='/home/ruiq/Music/solo')
parser.add_argument('--mode', type=str, default='train', help='train/val/test')
parser.add_argument('--json_file', type=str,default='./data/MUSIC_label/MUSIC_solo_videos.json')
parser.add_argument('--use_pretrain', type=int, default=0, help='whether to init from ckpt')
parser.add_argument('--ckpt_file', type=str, default='location_net_009_0.665.pth', help='pretrained model name')
parser.add_argument('--enable_img_augmentation', type=int, default=1, help='whether to augment input image')
parser.add_argument('--enable_audio_augmentation', type=int, default=1, help='whether to augment input audio')
parser.add_argument('--batch_size', type=int, default=32, help='training batch size')
parser.add_argument('--learning_rate', type=float, default=1e-4, help='training batch size')
parser.add_argument('--epoch', type=int, default=100, help='training epoch')
parser.add_argument('--gpu_ids', type=str, default='[0,1,2,3]', help='USING GPU IDS e.g.\'[0,4]\'')
parser.add_argument('--num_threads', type=int, default=4, help='number of threads')
parser.add_argument('--seed', type=int, default=10)
parser.add_argument('--evaluate', type=int, default=0, help='only evaluate or not')
parser.add_argument('--v_cluster', type=int, default=2, help='number of visual cluster')
parser.add_argument('--a_cluster', type=int, default=1, help='number of audio cluster')
args = parser.parse_args()
train_list_file = os.path.join(args.data_list_dir, 'solo_training_1.txt')
val_list_file = os.path.join(args.data_list_dir, 'solo_validation.txt')
test_list_file = os.path.join(args.data_list_dir, 'solo_testing.txt')
train_dataset = MUSIC_Dataset(args.data_dir, train_list_file, args)
val_dataset = MUSIC_Dataset(args.data_dir, val_list_file, args)
test_dataset = MUSIC_Dataset(args.data_dir, test_list_file, args)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_threads)
val_dataloader = DataLoader(dataset=val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_threads)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_threads)
# net setup
visual_backbone = resnet18(modal='vision',pretrained=False)
audio_backbone = resnet18(modal='audio')
av_model = DMC_NET(visual_net=visual_backbone, audio_net=audio_backbone, v_cluster_num=args.v_cluster, a_cluster_num=args.a_cluster)
if args.use_pretrain:
PATH = args.ckpt_file
state = torch.load(PATH)
av_model.load_state_dict(state, strict=False)
av_model_cuda = av_model.cuda()
loss_func = ContrastiveLoss()
optimizer = optim.Adam(params=av_model_cuda.parameters(), lr=args.learning_rate, betas=(0.9, 0.999),
weight_decay=0.0001)
if args.evaluate:
eva_location_acc = location_model_eva(av_model_cuda, test_dataloader)
return
for e in range(0, args.epoch):
print('Epoch is %03d' % e)
train_location_acc = location_model_train(av_model_cuda, train_dataloader, optimizer, loss_func)
eva_location_acc = location_model_eva(av_model_cuda, test_dataloader)
print('train acc is %.3f, val acc is %.3f' % (train_location_acc, eva_location_acc))
if e % 3 == 0:
PATH = 'ckpt/dmc/dmc_stage_one_%03d_%.3f.pth' % (e, eva_location_acc)
torch.save(av_model_cuda.state_dict(), PATH)
if __name__ == '__main__':
main()
| 8,957 | 41.254717 | 138 | py |
Simplified_DMC | Simplified_DMC-master/data/pair_video_audio.py | import os
import pdb
audio_dir = './MUSIC/solo/audio'
video_dir = './MUSIC/solo/video'
all_audios = os.listdir(audio_dir)
audios = [ audio for audio in all_audios if audio.endswith('.flac')]
all_videos = os.listdir(video_dir)
videos = [video for video in all_videos if video.endswith('.mp4')]
fid = open('solo_pairs.txt','w')
for each in audios:
video = each.replace('.flac', '.mp4')
if video in videos:
fid.write(each+' '+video+'\n')
fid.close()
| 469 | 22.5 | 69 | py |
Simplified_DMC | Simplified_DMC-master/data/MUSIC_dataset.py | import numpy as np
import librosa
from PIL import Image, ImageEnhance
import pickle
import random
import os
import torchvision.transforms as transforms
import json
import torch
def augment_image(image):
if(random.random() < 0.5):
image = image.transpose(Image.FLIP_LEFT_RIGHT)
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
return image
class MUSIC_Dataset(object):
def __init__(self, data_root, data_list_file, opt):
# self.root = root
# root = '/mnt/scratch/hudi/MUSIC/solo'
self.opt = opt
self.audio_root = os.path.join(data_root, 'audio_frames')
self.video_root = os.path.join(data_root, 'video_frames')
with open(data_list_file,'r') as fid:
pairs = [line.strip().split(' ') for line in fid.readlines()]
self.sample_label= self._parse_csv(self.opt.json_file)
self.audio_list = []
self.video_list = []
self.label_list = []
for each in pairs:
audio = each[0]
video = each[1]
assert audio[:-5] == video[:-4]
audio_path = os.path.join(self.audio_root, audio[:-5])
video_path = os.path.join(self.video_root, video[:-4])
audio_samples= os.listdir(audio_path)
for item in range(len(audio_samples)):
audio_segment = audio_samples[item]
video_segment = os.path.join(video_path, 'frame_'+audio_segment[:3])
if os.path.exists(video_segment):
self.audio_list.append(os.path.join(audio_path, audio_segment))
self.video_list.append(os.path.join(video_path, video_segment))
if self.opt.mode == 'val' or self.opt.mode == 'test':
img_transform_list = [transforms.Resize((224,224)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
else:
img_transform_list = [transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
self.img_transform = transforms.Compose(img_transform_list)
#self.audio_transform = audio_transform
def __len__(self):
return len(self.audio_list)
def _parse_csv(self, json_file):
f = open(json_file, encoding='utf-8')
content = f.read()
ins_indicator = json.loads(content)
ins_indicator = ins_indicator['videos']
ins_list = [*ins_indicator]
sample_label = {}
pickle.dump(ins_list, open('keylist.pkl', 'wb'))
for i in range(len(ins_list)):
current_list = ins_indicator[ins_list[i]]
for j in range(len(current_list)):
sample_label[current_list[j]] = i
return sample_label
def __getitem__(self, index):
# positive
cur_audio_segment = self.audio_list[index]
posi_video_segment = self.video_list[index]
if self.opt.mode == 'train':
posi_video_segment_img = random.choice(os.listdir(posi_video_segment))
else:
posi_video_segment_img = os.listdir(posi_video_segment)[0]
# load data
with open(cur_audio_segment, 'rb') as fid:
cur_audio_data = pickle.load(fid)
cur_audio_data = np.expand_dims(cur_audio_data, 0)
posi_img_path = os.path.join(posi_video_segment, posi_video_segment_img)
posi_img = Image.open(posi_img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
posi_img = augment_image(posi_img)
posi_img = self.img_transform(posi_img)
posi_label = self.sample_label[posi_video_segment[-28:-17]]
# TODO: here may need normalization
# negative
while(1):
nega_video_segment = random.choice(self.video_list)
if nega_video_segment[-28:-17] != posi_video_segment[-28:-17]:
break
nega_video_segment_img = random.choice(os.listdir(nega_video_segment))
nega_img_path = os.path.join(nega_video_segment, nega_video_segment_img)
nega_img = Image.open(nega_img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
nega_img = augment_image(nega_img)
nega_img = self.img_transform(nega_img)
nega_label = self.sample_label[nega_video_segment[-28:-17]]
if self.opt.mode == 'train':
return cur_audio_data, posi_img, nega_img, posi_label, nega_label, posi_video_segment, cur_audio_segment
return cur_audio_data, posi_img, nega_img, posi_label, nega_label, posi_img_path, cur_audio_segment
class MUSIC_Dataset_(object):
def __init__(self, data_root, data_list_file, opt):
# self.root = root
# root = '/mnt/scratch/hudi/MUSIC/solo'
self.opt = opt
if self.opt.mode == 'train':
self.audio_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/train/audio'
self.video_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/train/video'
else:
self.audio_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/test/audio'
self.video_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/test/video'
self.box_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/test/box'
self.audio_list = os.listdir(self.audio_root)
self.video_list = os.listdir(self.video_root)
self.box_list = os.listdir(self.box_root)
self.audio_list.sort()
self.video_list.sort()
self.box_list.sort()
assert len(self.audio_list) == len(self.video_list)
if self.opt.mode == 'val' or self.opt.mode == 'test':
img_transform_list = [transforms.Resize((224,224)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
else:
img_transform_list = [transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
self.img_transform = transforms.Compose(img_transform_list)
def __len__(self):
return len(self.audio_list)
def __getitem__(self, index):
# positive
cur_audio_segment = self.audio_list[index]
posi_video_segment = self.video_list[index]
if self.opt.mode == 'val':
box_segment = self.box_list[index]
# load data
with open(os.path.join(self.audio_root, cur_audio_segment), 'rb') as fid:
cur_audio_data = pickle.load(fid)
cur_audio_data = np.expand_dims(cur_audio_data, 0)
posi_img_path = os.path.join(self.video_root, posi_video_segment)
posi_img = Image.open(posi_img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
posi_img = augment_image(posi_img)
posi_img = self.img_transform(posi_img)
while(1):
nega_video_segment = random.choice(self.video_list)
if nega_video_segment != posi_video_segment:
break
nega_img_path = os.path.join(self.video_root, nega_video_segment)
nega_img = Image.open(nega_img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
nega_img = augment_image(nega_img)
nega_img = self.img_transform(nega_img)
if self.opt.mode == 'val':
box = np.load(os.path.join(self.box_root, box_segment))
return cur_audio_data, posi_img, nega_img, torch.tensor(0), torch.tensor(0), torch.tensor(0), box
return cur_audio_data, posi_img, nega_img, torch.tensor(0), torch.tensor(0), torch.tensor(0), torch.tensor(0)
class MUSIC_AV_Classify(object):
def __init__(self, video_dirs, aud_dirs, label, opt):
self.opt = opt
self.video_dirs = video_dirs
self.aud_dirs = aud_dirs
self.label = label
if self.opt.mode == 'val' or self.opt.mode == 'test':
img_transform_list = [transforms.Resize((224,224)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
else:
img_transform_list = [transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
self.img_transform = transforms.Compose(img_transform_list)
def __len__(self):
return len(self.video_dirs)
def __getitem__(self, index):
video_segment_img = random.choice(os.listdir(self.video_dirs[index]))
img_path = os.path.join(self.video_dirs[index], video_segment_img)
img = Image.open(img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
img = augment_image(img)
img_data = self.img_transform(img)
with open(self.aud_dirs[index], 'rb') as fid:
cur_audio_data = pickle.load(fid)
audio_data = np.expand_dims(cur_audio_data, 0)
if self.opt.mode == 'val' or self.opt.mode == 'test':
return audio_data, img_data
else:
return audio_data, img_data, self.label[index] | 9,784 | 42.29646 | 117 | py |
Simplified_DMC | Simplified_DMC-master/data/base_sampler.py | import torch
from torch.utils.data.sampler import Sampler
Class BaseSampler(Sampler):
def __init__(self):
super(BaseSampler,self).__init__()
def __len__(self):
def __iter__(self):
| 203 | 17.545455 | 44 | py |
Simplified_DMC | Simplified_DMC-master/data/cut_audios.py | import numpy as np
import librosa
import pickle
import os
import pdb
with open('data_indicator/music/solo/solo_pairs.txt','r') as fid:
audios = [line.strip().split(' ')[0] for line in fid.readlines()]
audio_dir = './MUSIC/solo/audio'
save_dir = './MUSIC/solo/audio_frames'
#def audio_extract(wav_name, sr=22000):
def audio_extract(wav_name, sr=16000):
#pdb.set_trace()
wav_file = os.path.join(audio_dir, wav_name)
save_path = os.path.join(save_dir, wav_name[:-5])
if not os.path.exists(save_path):
os.mkdir(save_path)
wav, cur_sr = librosa.load(wav_file, sr=sr)
if cur_sr !=sr:
pdb.set_trace()
secs = int(len(wav)/sr)
print(secs)
for i in range(secs):
start = sr * i
end = sr * (i+1)
cur_wav = wav[start:end]
#spec = librosa.core.stft(cur_wav, n_fft=0.01*sr, hop_length=0.005*sr,
# window='hann', center=True, pad_mode='constant')
spec = librosa.core.stft(cur_wav, n_fft=160, hop_length=80,
window='hann', center=True, pad_mode='constant')
#mel = librosa.feature.melspectrogram(S = np.abs(spec), sr=sr, n_mels=256, fmax=sr/2)
mel = librosa.feature.melspectrogram(S = np.abs(spec), sr=sr, n_mels=64, fmax=sr/2)
log_mel = librosa.core.power_to_db(mel)
log_mel_T = log_mel.T.astype('float32')
assert log_mel_T.shape == (201,64)
#pdb.set_trace()
save_name = os.path.join(save_path, '{:03d}.pkl'.format(i))
#print(save_name)
with open(save_name, 'wb') as fid:
pickle.dump(log_mel_T, fid)
for audio in audios:
print(audio)
audio_extract(audio)
#pdb.set_trace()
| 1,685 | 33.408163 | 93 | py |
Simplified_DMC | Simplified_DMC-master/data/data_split.py | import os
import json
solo_videos = './MUSIC_label/MUSIC_solo_videos.json'
solo_videos = json.load(open(solo_videos, 'r'))
solo_videos = solo_videos['videos']
trains = []
vals = []
for _, item in solo_videos.items():
for i, vid in enumerate(item):
if i < 5:
vals.append(vid)
else:
trains.append(vid)
videos = open('./data_indicator/music/solo/solo_pairs.txt', 'r')
train_file = open('./data_indicator/music/solo/solo_training.txt', 'w')
val_file = open('./data_indicator/music/solo/solo_validation.txt', 'w')
while True:
pair = videos.readline()
if len(pair) == 0:
break
vid = pair.split(' ')[0][:-12]
if vid in trains:
train_file.write(pair)
elif vid in vals:
val_file.write(pair)
videos.close()
train_file.close()
val_file.close() | 825 | 24.030303 | 71 | py |
Simplified_DMC | Simplified_DMC-master/data/cut_videos.py | import os
import cv2
import pdb
def video2frame(video_path, frame_save_path, frame_interval=1):
vid = cv2.VideoCapture(video_path)
fps = vid.get(cv2.CAP_PROP_FPS)
#pdb.set_trace()
success, image = vid.read()
count = 0
while success:
count +=1
if count % frame_interval == 0:
#cv2.imencode('.png', image)[1].tofile(frame_save_path+'/fame_%d.png'%count)
save_name = '{}/frame_{}_{}.jpg'.format(frame_save_path, int(count/fps),count)
cv2.imencode('.jpg', image)[1].tofile(save_name)
success, image = vid.read()
print(count)
def video2frame_update(video_path, frame_save_path, frame_kept_per_second=4):
vid = cv2.VideoCapture(video_path)
fps = vid.get(cv2.CAP_PROP_FPS)
video_frames = vid.get(cv2.CAP_PROP_FRAME_COUNT)
video_len = int(video_frames/fps)
print(video_len)
count = 0
frame_interval = int(fps/frame_kept_per_second)
while(count < fps*video_len):
ret, image = vid.read()
if not ret:
break
if count % fps == 0:
frame_id = 0
if frame_id<frame_interval*frame_kept_per_second and frame_id%frame_interval == 0:
#cv2.imencode('.png', image)[1].tofile(frame_save_path+'/fame_%d.png'%count)
save_dir = '{}/frame_{:03d}'.format(frame_save_path, int(count/fps))
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_name = '{}/frame_{:03d}/{:05d}.jpg'.format(frame_save_path, int(count/fps), count)
cv2.imencode('.jpg', image)[1].tofile(save_name)
frame_id += 1
count += 1
video_dir = './MUSIC/solo/video'
#videos = os.listdir(video_dir)
with open('data_indicator/music/solo/solo_pairs.txt','r') as fid:
videos = [line.strip().split(' ')[1] for line in fid.readlines()]
save_dir = './MUSIC/solo/video_frames'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
vid_count = 0
for each_video in videos:
if not each_video.endswith('.mp4'):
continue
print(each_video)
video_path = os.path.join(video_dir, each_video)
save_path = os.path.join(save_dir, each_video[:-4])
if not os.path.exists(save_path):
os.mkdir(save_path)
video2frame_update(video_path, save_path, frame_kept_per_second=4)
#pdb.set_trace()
print('cut %d videos' % vid_count)
| 2,377 | 32.492958 | 99 | py |
Simplified_DMC | Simplified_DMC-master/model/base_model.py | import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, modal, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.modal = modal
self.groups = groups
self.base_width = width_per_group
self.conv1_a = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
if self.modal == 'audio':
x = self.conv1_a(x)
else:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, modal, **kwargs):
model = ResNet(block, layers, modal, **kwargs)
if pretrained:
print('load pretrained res-18')
model.load_state_dict(torch.load('../resnet18-5c106cde.pth'), strict=False)
return model
def resnet18(pretrained=False, progress=True, modal='vision',**kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, modal, **kwargs)
| 9,147 | 38.261803 | 106 | py |
Simplified_DMC | Simplified_DMC-master/model/audio_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Unet(nn.Module):
def __init__(self, fc_dim=64, num_downs=5, ngf=64, use_dropout=False):
super(Unet, self).__init__()
# construct unet structure
unet_block = UnetBlock(
ngf * 8, ngf * 8, input_nc=None,
submodule=None, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetBlock(
ngf * 8, ngf * 8, input_nc=None,
submodule=unet_block, use_dropout=use_dropout)
unet_block = UnetBlock(
ngf * 4, ngf * 8, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
ngf * 2, ngf * 4, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
ngf, ngf * 2, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
fc_dim, ngf, input_nc=1,
submodule=unet_block, outermost=True)
self.bn0 = nn.BatchNorm2d(1)
self.unet_block = unet_block
def forward(self, x):
x = self.bn0(x)
x = self.unet_block(x)
return x
# Defines the submodule with skip connection.
# |-- downsampling -- |submodule| -- upsampling --|
class UnetBlock(nn.Module):
def __init__(self, outer_nc, inner_input_nc, input_nc=None,
submodule=None, outermost=False, innermost=False,
use_dropout=False, inner_output_nc=None, noskip=False):
super(UnetBlock, self).__init__()
self.outermost = outermost
self.noskip = noskip
use_bias = False
if input_nc is None:
input_nc = outer_nc
if innermost:
inner_output_nc = inner_input_nc
elif inner_output_nc is None:
inner_output_nc = 2 * inner_input_nc
downrelu = nn.LeakyReLU(0.2, True)
downnorm = nn.BatchNorm2d(inner_input_nc)
uprelu = nn.ReLU(True)
upnorm = nn.BatchNorm2d(outer_nc)
upsample = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True)
if outermost:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3, padding=1)
down = [downconv]
up = [uprelu, upsample, upconv]
model = down + [submodule] + up
elif innermost:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upsample, upconv, upnorm]
model = down + up
else:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upsample, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost or self.noskip:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
| 3,744 | 33.675926 | 74 | py |
Simplified_DMC | Simplified_DMC-master/model/vision_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Resnet(nn.Module):
def __init__(self, original_resnet):
super(Resnet, self).__init__()
self.features = nn.Sequential(
*list(original_resnet.children())[:-1])
# for param in self.features.parameters():
# param.requires_grad = False
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), x.size(1))
return x
class ResnetFC(nn.Module):
def __init__(self, original_resnet, fc_dim=64,
pool_type='maxpool', conv_size=3):
super(ResnetFC, self).__init__()
self.pool_type = pool_type
self.features = nn.Sequential(
*list(original_resnet.children())[:-2])
self.fc = nn.Conv2d(
512, fc_dim, kernel_size=conv_size, padding=conv_size//2)
def forward(self, x, pool=True):
x = self.features(x)
x = self.fc(x)
if not pool:
return x
if self.pool_type == 'avgpool':
x = F.adaptive_avg_pool2d(x, 1)
elif self.pool_type == 'maxpool':
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), x.size(1))
return x
def forward_multiframe(self, x, pool=True):
(B, C, T, H, W) = x.size()
x = x.permute(0, 2, 1, 3, 4).contiguous()
x = x.view(B*T, C, H, W)
x = self.features(x)
x = self.fc(x)
(_, C, H, W) = x.size()
x = x.view(B, T, C, H, W)
x = x.permute(0, 2, 1, 3, 4)
if not pool:
return x
if self.pool_type == 'avgpool':
x = F.adaptive_avg_pool3d(x, 1)
elif self.pool_type == 'maxpool':
x = F.adaptive_max_pool3d(x, 1)
x = x.view(B, C)
return x
class ResnetDilated(nn.Module):
def __init__(self, orig_resnet, fc_dim=64, pool_type='maxpool',
dilate_scale=16, conv_size=3):
super(ResnetDilated, self).__init__()
from functools import partial
self.pool_type = pool_type
if dilate_scale == 8:
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=4))
elif dilate_scale == 16:
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=2))
self.features = nn.Sequential(
*list(orig_resnet.children())[:-2])
self.fc = nn.Conv2d(
512, fc_dim, kernel_size=conv_size, padding=conv_size//2)
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate//2, dilate//2)
m.padding = (dilate//2, dilate//2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x, pool=True):
x = self.features(x)
x = self.fc(x)
if not pool:
return x
if self.pool_type == 'avgpool':
x = F.adaptive_avg_pool2d(x, 1)
elif self.pool_type == 'maxpool':
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), x.size(1))
return x
def forward_multiframe(self, x, pool=True):
(B, C, T, H, W) = x.size()
x = x.permute(0, 2, 1, 3, 4).contiguous()
x = x.view(B*T, C, H, W)
x = self.features(x)
x = self.fc(x)
(_, C, H, W) = x.size()
x = x.view(B, T, C, H, W)
x = x.permute(0, 2, 1, 3, 4)
if not pool:
return x
if self.pool_type == 'avgpool':
x = F.adaptive_avg_pool3d(x, 1)
elif self.pool_type == 'maxpool':
x = F.adaptive_max_pool3d(x, 1)
x = x.view(B, C)
return x
| 4,152 | 27.445205 | 69 | py |
Simplified_DMC | Simplified_DMC-master/model/base_model_v1.py | import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, modal, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.modal = modal
self.groups = groups
self.base_width = width_per_group
self.conv1_a = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=2, padding=3,
bias=False)
self.conv1_v = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
if self.modal == 'audio':
x = self.conv1_a(x)
else:
x = self.conv1_v(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, modal, **kwargs):
model = ResNet(block, layers, modal, **kwargs)
return model
def resnet18(pretrained=False, progress=True, modal='vision',**kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, modal, **kwargs)
| 9,008 | 38.169565 | 106 | py |
Simplified_DMC | Simplified_DMC-master/model/dmc_model.py | import torch
import torch.nn as nn
import random
class Cluster_layer(nn.Module):
def __init__(self, input_dim = 512, num_cluster=2, iters=4, beta=-30, **kwargs):
super(Cluster_layer, self).__init__()
self.input_dim = input_dim
self.num_cluster = num_cluster
self.iters = iters
self.beta = beta
self.epsilon = torch.tensor(1e-10).type(torch.FloatTensor)#.cuda()
def forward(self, u_vecs):
(batch_size, input_num, feature_dim) = u_vecs.size()
ini_interval = int(input_num/self.num_cluster) #
o = torch.unsqueeze(u_vecs[:, 0, :], dim=1)
count = 1
while(self.num_cluster-count > 0):
current_o = torch.unsqueeze(u_vecs[:, ini_interval*count, :], dim=1) #ini_interval*count
o = torch.cat([o, current_o], dim=1)
count += 1
for i in range(self.iters):
nx = torch.sum(o**2, dim=2, keepdim=True)
ny = torch.sum(u_vecs**2, dim=2, keepdim=True)
qq = nx - 2 * torch.bmm(o, u_vecs.permute(0,2,1)) + ny.permute(0,2,1)
b = torch.sqrt(torch.max(qq, self.epsilon))
c = nn.functional.softmax(self.beta*b, dim=1) # assignments [None, output_num_capsule, input_num_capsule]
o = torch.bmm(c, u_vecs) # cluster centers [None, num_cluster, dim_cluster]
weights = torch.sum(c, dim=2, keepdim=True)
o = o / weights
return o, c
class DMC_NET(nn.Module):
def __init__(self, visual_net, audio_net, v_cluster_num = 4, a_cluster_num = 2):
super(DMC_NET, self).__init__()
# backbone net
self.visual_net = visual_net
self.audio_net = audio_net
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
# visual ops
self.fc_v_1 = nn.Linear(512, 512)
self.fc_v_2 = nn.Linear(128, 128)
# audio ops
self.pooling_a = nn.AdaptiveMaxPool2d((1, 1))
self.fc_a_1 = nn.Linear(512, 512)
self.fc_a_2 = nn.Linear(128, 128)
self.relu = nn.ReLU(inplace=True)
# fusion ops
self.fc_av = nn.Linear(1, 2)
self.v_clustering = Cluster_layer(num_cluster=v_cluster_num)
self.a_clustering = Cluster_layer(num_cluster=a_cluster_num)
self.epsilon = torch.tensor(1e-10).type(torch.FloatTensor)#.cuda()
def forward(self, v_input, a_input):
# visual pathway
v_fea = self.visual_net(v_input)
(B, C, H, W) = v_fea.size()
v_fea = v_fea.view(B, C, H*W)
v_fea = v_fea.permute(0,2,1)
v_fea = self.fc_v_1(v_fea)
v_centers, v_assign = self.v_clustering(v_fea)
# audio pathway
a_fea = self.audio_net(a_input)
(B, C, H, W) = a_fea.size()
a_fea = a_fea.view(B, C, H*W)
a_fea = a_fea.permute(0,2,1)
a_fea = self.fc_a_1(a_fea)
a_centers, a_assign = self.a_clustering(a_fea)
v_centers_ = torch.sum(v_centers ** 2, dim=2, keepdim=True)
a_centers_ = torch.sum(a_centers ** 2, dim=2, keepdim=True)
distance_ = torch.sqrt(torch.max(v_centers_ - 2 * torch.bmm(v_centers, a_centers.permute(0, 2, 1)) + a_centers_.permute(0, 2, 1), self.epsilon))
distance = torch.min(distance_, dim=1)
distance = distance.values
return distance, v_assign, distance_ | 3,338 | 35.293478 | 152 | py |
synfeal | synfeal-main/utils.py | import numpy as np
import os
import cv2
import torch
import torch
import math
import yaml
from sklearn.metrics import mean_squared_error
from torchsummary import summary
from yaml.loader import SafeLoader
from colorama import Fore
from scipy.spatial.transform import Rotation as R
from models.loss_functions import BetaLoss, DynamicLoss
from models.posenet import PoseNetGoogleNet, PoseNetResNet
from models.poselstm import PoseLSTM
from models.hourglass import HourglassBatch
from synfeal_collection.src.pypcd_no_ros import PointCloud
def write_pcd(filename, msg, mode='binary'):
pc = PointCloud.from_msg(msg)
pc.save_pcd(filename, compression=mode)
def read_pcd(filename):
if not os.path.isfile(filename):
raise Exception("[read_pcd] File does not exist.")
pc = PointCloud.from_path(filename)
return pc
def write_transformation(filename, transformation):
np.savetxt(filename, transformation, delimiter=',',fmt='%.5f')
def write_img(filename, img):
cv2.imwrite(filename, img)
def matrixToRodrigues(matrix):
rods, _ = cv2.Rodrigues(matrix[0:3, 0:3])
rods = rods.transpose()
rodrigues = rods[0]
return rodrigues
def matrixToQuaternion(matrix):
rot_matrix = matrix[0:3, 0:3]
r = R.from_matrix(rot_matrix)
return r.as_quat()
def matrixToXYZ(matrix):
return matrix[0:3,3]
def rodriguesToMatrix(r):
rod = np.array(r, dtype=np.float)
matrix = cv2.Rodrigues(rod)
return matrix[0]
def quaternionToMatrix(quat):
return R.from_quat(quat).as_matrix()
def poseToMatrix(pose):
matrix = np.zeros((4,4))
rot_mat = quaternionToMatrix(pose[3:])
trans = pose[:3]
matrix[0:3,0:3] = rot_mat
matrix[0:3,3] = trans
matrix[3,3] = 1
return matrix
def write_intrinsic(filename, data):
matrix = np.zeros((3,3))
matrix[0,0] = data[0]
matrix[0,1] = data[1]
matrix[0,2] = data[2]
matrix[1,0] = data[3]
matrix[1,1] = data[4]
matrix[1,2] = data[5]
matrix[2,0] = data[6]
matrix[2,1] = data[7]
matrix[2,2] = data[8]
np.savetxt(filename, matrix, delimiter=',',fmt='%.5f')
def rotationAndpositionToMatrix44(rotation, position):
matrix44 = np.empty(shape=(4,4))
matrix44[:3,:3] = rotation
matrix44[:3,3] = position
matrix44[3,:3] = 0
matrix44[3,3] = 1
return matrix44
def matrix44_to_pose(matrix44):
quaternion = matrixToQuaternion(matrix44)
quaternion = normalize_quat(quaternion)
xyz = matrixToXYZ(matrix44)
pose = np.append(xyz, quaternion)
return pose
def compute_position_error(pred, targ):
pred = pred[:3]
targ = targ[:3]
return mean_squared_error(pred, targ, squared=False) # RMSE
def compute_rotation_error(pred, targ):
## second way: using rodrigues (like ATOM) --> better because angle ranges from 0 to pi (whereas with quaterions ranges from 0 to 2pi)
pred_matrix = poseToMatrix(pred)
targ_matrix = poseToMatrix(targ)
delta = np.dot(np.linalg.inv(pred_matrix), targ_matrix)
deltaR = matrixToRodrigues(delta[0:3, 0:3])
return np.linalg.norm(deltaR)
def normalize_quat(x, p=2, dim=1):
"""
Divides a tensor along a certain dim by the Lp norm
:param x:
:param p: Lp norm
:param dim: Dimension to normalize along
:return:
"""
if torch.is_tensor(x):
# x.shape = (N,4)
xn = x.norm(p=p, dim=dim) # computes the norm: 1xN
x = x / xn.unsqueeze(dim=dim)
else: # numpy
xn = np.linalg.norm(x)
x = x/xn
return x
def summarizeModel(model, input_example):
model.cuda()
summary(model, input_size=input_example.shape)
model.cpu()
def resumeTraining(folder_name):
model_name = [f for f in os.listdir(folder_name) if f.endswith('.pth')][0] # get first in the list of files that have extension .pth
file_name = f'{folder_name}/config.yaml'
with open(file_name) as f:
config = yaml.load(f, Loader=SafeLoader)
model = eval(config['init_model'])
model.load_state_dict(torch.load(f'{folder_name}/{model_name}'))
start_epoch = config['epoch']
train_losses = config['train_losses']
test_losses = config['test_losses']
print(f'{Fore.BLUE} Resuming training of model from epoch: {start_epoch} {Fore.RESET}')
return start_epoch, train_losses, test_losses, model
def process_pose(pose):
quat_unit = normalize_quat(pose[:,3:])
return torch.cat((pose[:,:3], quat_unit), dim=1)
def projectToCamera(intrinsic_matrix, distortion, width, height, pts):
"""
Projects a list of points to the camera defined transform, intrinsics and distortion
:param intrinsic_matrix: 3x3 intrinsic camera matrix
:param distortion: should be as follows: (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])
:param width: the image width
:param height: the image height
:param pts: a list of point coordinates (in the camera frame) with the following format: np array 4xn or 3xn
:return: a list of pixel coordinates with the same length as pts
"""
_, n_pts = pts.shape
# Project the 3D points in the camera's frame to image pixels
# From https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
pixs = np.zeros((2, n_pts), dtype=np.float)
k1, k2, p1, p2, k3 = distortion
# fx, _, cx, _, fy, cy, _, _, _ = intrinsic_matrix
# print('intrinsic=\n' + str(intrinsic_matrix))
fx = intrinsic_matrix[0, 0]
fy = intrinsic_matrix[1, 1]
cx = intrinsic_matrix[0, 2]
cy = intrinsic_matrix[1, 2]
x = pts[0, :]
y = pts[1, :]
z = pts[2, :]
dists = np.linalg.norm(pts[0:3, :], axis=0) # compute distances from point to camera
xl = np.divide(x, z) # compute homogeneous coordinates
yl = np.divide(y, z) # compute homogeneous coordinates
r2 = xl ** 2 + yl ** 2 # r square (used multiple times bellow)
xll = xl * (1 + k1 * r2 + k2 * r2 ** 2 + k3 * r2 ** 3) + 2 * p1 * xl * yl + p2 * (r2 + 2 * xl ** 2)
yll = yl * (1 + k1 * r2 + k2 * r2 ** 2 + k3 * r2 ** 3) + p1 * (r2 + 2 * yl ** 2) + 2 * p2 * xl * yl
pixs[0, :] = fx * xll + cx
pixs[1, :] = fy * yll + cy
# Compute mask of valid projections
valid_z = z > 0
valid_xpix = np.logical_and(pixs[0, :] >= 0, pixs[0, :] < width)
valid_ypix = np.logical_and(pixs[1, :] >= 0, pixs[1, :] < height)
valid_pixs = np.logical_and(valid_z, np.logical_and(valid_xpix, valid_ypix))
return pixs, valid_pixs, dists
def synthesize_pose(pose1, pose2):
"""
synthesize pose between pose1 and pose2
pose1: 4x4
pose2: 4x4
"""
pos1 = pose1[:3,3]
rot1 = pose1[:3,:3]
pos2 = pose2[:3,3]
rot2 = pose2[:3,:3]
# rot3x3 to euler angles
rot1_euler = R.from_matrix(rot1).as_euler('xyz', degrees=False)
rot2_euler = R.from_matrix(rot2).as_euler('xyz', degrees=False)
pos3 = (pos1 + pos2) / 2
rot3_euler = (rot1_euler + rot2_euler) / 2
rot3 = R.from_euler('xyz', rot3_euler, degrees=False).as_matrix()
pose3 = np.zeros(shape=(4,4))
pose3[:3,:3] = rot3
pose3[:3,3] = pos3
pose3[-1,-1] = 1
return pose3
def applyNoise(matrix44, pos_error, rot_error):
xyz = matrixToXYZ(matrix44)
euler = R.from_quat(matrixToQuaternion(matrix44)).as_euler('xyz', 'degrees')
# adapted from ATOM
v = np.random.uniform(-1.0, 1.0, 3)
v = v / np.linalg.norm(v)
new_xyz = xyz + v * (pos_error*math.sqrt(3))
v = np.random.choice([-1.0, 1.0], 3) * (rot_error/math.sqrt(3))
new_euler = euler + v
rotation_angles = R.from_euler('xyz', new_euler, degrees=True).as_matrix()
new_matrix44 = rotationAndpositionToMatrix44(rotation=rotation_angles, position=new_xyz)
return new_matrix44 | 7,993 | 29.51145 | 146 | py |
synfeal | synfeal-main/dataset.py | import cv2
import torch.utils.data as data
import numpy as np
import torch
import os
import yaml
from PIL import Image
from yaml.loader import SafeLoader
from utils import read_pcd, matrixToXYZ, matrixToQuaternion, normalize_quat
# pytorch datasets: https://pytorch.org/tutorials/beginner/basics/data_tutorial.html
class Dataset(data.Dataset):
def __init__(self, path_seq, rgb_transform = None, depth_transform = None, inputs = None):
self.root = f'{os.environ.get("SYNFEAL_DATASET")}/datasets/localbot'
self.seq = path_seq
self.path_seq = f'{self.root}/{path_seq}'
self.rgb_transform = rgb_transform
self.depth_transform = depth_transform
if inputs == None:
self.inputs = ['point_cloud', 'depth_image', 'rgb_image']
else:
self.inputs = inputs
config = self.getConfig()
if 'statistics' in config:
self.depth_mean = config['statistics']['D']['mean']
self.depth_std = config['statistics']['D']['std']
def __getitem__(self, index):
output = []
if 'point_cloud' in self.inputs:
# load point cloud
pc_raw = read_pcd(f'{self.path_seq}/frame-{index:05d}.pcd')
point_set = np.vstack([pc_raw.pc_data['x'], pc_raw.pc_data['y'], pc_raw.pc_data['z']]).T # stays NX3
point_set = torch.from_numpy(point_set.astype(np.float32))
output.append(point_set)
if 'depth_image' in self.inputs:
# load depth image
depth_image = cv2.imread(f'{self.path_seq}/frame-{index:05d}.depth.png', cv2.IMREAD_UNCHANGED)
depth_image = depth_image.astype(np.float32) / 1000.0 # to meters
depth_image = Image.fromarray(depth_image)
if self.depth_transform!=None:
depth_image = self.depth_transform(depth_image)
output.append(depth_image)
if 'rgb_image' in self.inputs:
# TODO: change this to the correct dataset
rgb_image = Image.open(f'{self.path_seq}/frame-{index:05d}.rgb.png')
if self.rgb_transform != None:
rgb_image = self.rgb_transform(rgb_image)
output.append(rgb_image)
# load pose
matrix = np.loadtxt(f'{self.path_seq}/frame-{index:05d}.pose.txt', delimiter=',')
quaternion = matrixToQuaternion(matrix)
quaternion = normalize_quat(quaternion)
xyz = matrixToXYZ(matrix)
pose = np.append(xyz, quaternion)
pose = torch.from_numpy(pose.astype(np.float32))
output.append(pose)
return tuple(output)
def __len__(self):
return sum(f.endswith('pose.txt') for f in os.listdir(self.path_seq))
def getConfig(self):
with open(f'{self.path_seq}/config.yaml') as f:
config = yaml.load(f, Loader=SafeLoader)
return config
def setConfig(self, config):
with open(f'{self.path_seq}/config.yaml', 'w') as f:
yaml.dump(config, f)
# config_stats = Dataset('seq5',depth_transform=None ,rgb_transform=None, inputs=['depth_image']).getConfig()['statistics']
# rgb_mean = [config_stats['R']['mean'], config_stats['G']['mean'], config_stats['B']['mean']]
# rgb_std = [config_stats['R']['std'], config_stats['G']['std'], config_stats['B']['std']]
# depth_mean = config_stats['D']['mean']
# depth_std = config_stats['D']['std']
# print(depth_mean)
# transforms.Resize(300),
# transforms.CenterCrop(299),
# transforms.ToTensor(),
# transforms.Normalize(mean=(depth_mean,), std=(depth_std,))
# ])
# transforms.Resize(300),
# transforms.RandomCrop(299),
# transforms.ToTensor(),
# transforms.Normalize(rgb_mean, rgb_std)
# ])
# transforms.Resize(300),
# transforms.CenterCrop(299),
# transforms.ToTensor(),
# transforms.Normalize(rgb_mean, rgb_std)
# ])
# dataset = Dataset('seq6',depth_transform=depth_transform_train ,rgb_transform=rgb_transform_train, inputs=['depth_image', 'rgb_image'])
# for i in range(100,110):
# print(f'depth size: {dataset[i][0].shape}')
# print(f'rgb size: {dataset[i][1].shape}')
# print(f'depth mean: {np.mean(dataset[i][0].numpy())}')
# print(f'rgb mean: {np.mean(dataset[i][1].numpy())}')
| 4,547 | 34.53125 | 137 | py |
synfeal | synfeal-main/utils_ros.py | import copy
import math
import tf
import rospy
import os
from geometry_msgs.msg import Pose, Point
from visualization_msgs.msg import *
from std_msgs.msg import Header, ColorRGBA
from synfeal_collection.src.pypcd import PointCloud
def write_pcd(filename, msg, mode='binary'):
pc = PointCloud.from_msg(msg)
pc.save_pcd(filename, compression=mode)
def read_pcd(filename):
if not os.path.isfile(filename):
raise Exception("[read_pcd] File does not exist.")
pc = PointCloud.from_path(filename)
return pc
def data2pose(data):
if type(data) is str:
data = list(data)
lst_data = [i for i in data if i!=','] # remove ','
data = {'x' : lst_data[0],
'y' : lst_data[1],
'z' : lst_data[2],
'rx' : lst_data[3],
'ry' : lst_data[4],
'rz' : lst_data[5]}
quaternion = tf.transformations.quaternion_from_euler(data['rx'], data['ry'], data['rz'])
#quaternion = R.from_euler('xyz',[[data['rx'], data['ry'], data['rz']]], degrees=False).as_quat()
p = Pose()
p.position.x = data['x']
p.position.y = data['y']
p.position.z = data['z']
p.orientation.x = quaternion[0]
p.orientation.y = quaternion[1]
p.orientation.z = quaternion[2]
p.orientation.w = quaternion[3]
return p
def createArrowMarker(pose, color):
pose_marker = copy.deepcopy(pose)
matrix_quaternion_marker = pose_marker[3:]
#matrix_quaternion_marker = R.from_quat(pose_marker[3:]).as_matrix()
# rotate_y90 = R.from_euler('y', -90, degrees=True).as_matrix()
# matrix_quaternion_marker = np.dot(
# matrix_quaternion_marker, rotate_y90)
# quaternion_marker = R.from_matrix(
# matrix_quaternion_marker).as_quat()
marker = Marker(header=Header(
frame_id="world", stamp=rospy.Time.now()))
marker.type = marker.ARROW
marker.action = marker.ADD
marker.scale.x = 0.3
marker.scale.y = 0.05
marker.scale.z = 0.05
marker.color.a = color[-1]
marker.color.r = color[0]
marker.color.g = color[1]
marker.color.b = color[2]
marker.pose.orientation.x = matrix_quaternion_marker[0]
marker.pose.orientation.y = matrix_quaternion_marker[1]
marker.pose.orientation.z = matrix_quaternion_marker[2]
marker.pose.orientation.w = matrix_quaternion_marker[3]
marker.pose.position.x = pose[0]
marker.pose.position.y = pose[1]
marker.pose.position.z = pose[2]
marker.ns = 'final_pose'
marker.id = 1
return marker
def getFrustumMarkerArray(w, h, f_x, f_y, Z_near, Z_far, frame_id, ns, color, alpha=0.9, thickness=0.005, lifetime=False):
marker_array = MarkerArray()
# Define view frustum points
fov_x = 2 * math.atan2(w, (2 * f_x))
fov_y = 2 * math.atan2(h, (2 * f_y))
x_n = math.tan(fov_x / 2) * Z_near
y_n = math.tan(fov_y / 2) * Z_near
x_f = math.tan(fov_x / 2) * Z_far
y_f = math.tan(fov_y / 2) * Z_far
points = [Point(-x_n, y_n, Z_near),
Point(x_n, y_n, Z_near),
Point(x_n, -y_n, Z_near),
Point(-x_n, -y_n, Z_near),
Point(-x_f, y_f, Z_far),
Point(x_f, y_f, Z_far),
Point(x_f, -y_f, Z_far),
Point(-x_f, -y_f, Z_far)]
# Define wireframe
color_rviz = ColorRGBA(r=color[0]/2, g=color[1]/2, b=color[2]/2, a=1.0)
marker = Marker(ns=ns+'_wireframe', type=Marker.LINE_LIST, action=Marker.ADD, header=Header(frame_id=frame_id),
color=color_rviz)
if lifetime:
marker.lifetime=rospy.Duration(0)
marker.scale.x = thickness # line width
marker.pose.orientation.w = 1.0
# marker line points
marker.points.append(points[0])
marker.points.append(points[1])
marker.points.append(points[1])
marker.points.append(points[2])
marker.points.append(points[2])
marker.points.append(points[3])
marker.points.append(points[3])
marker.points.append(points[0])
marker.points.append(points[0])
marker.points.append(points[4])
marker.points.append(points[1])
marker.points.append(points[5])
marker.points.append(points[2])
marker.points.append(points[6])
marker.points.append(points[3])
marker.points.append(points[7])
marker.points.append(points[4])
marker.points.append(points[5])
marker.points.append(points[5])
marker.points.append(points[6])
marker.points.append(points[6])
marker.points.append(points[7])
marker.points.append(points[7])
marker.points.append(points[4])
marker_array.markers.append(copy.deepcopy(marker))
# Define filled
color_rviz = ColorRGBA(r=color[0], g=color[1], b=color[2], a=alpha)
marker = Marker(ns=ns+'_filled', type=Marker.TRIANGLE_LIST, action=Marker.ADD, header=Header(frame_id=frame_id),
color=color_rviz)
if lifetime:
marker.lifetime=rospy.Duration(0)
marker.scale.x = 1 # line width
marker.scale.y = 1 # line width
marker.scale.z = 1 # line width
marker.pose.orientation.w = 1.0
# marker triangles of the lateral face of the frustum pyramid
marker.points.append(points[1])
marker.points.append(points[2])
marker.points.append(points[6])
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.points.append(points[1])
marker.points.append(points[6])
marker.points.append(points[5])
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.points.append(points[0])
marker.points.append(points[4])
marker.points.append(points[3])
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.points.append(points[3])
marker.points.append(points[4])
marker.points.append(points[7])
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.points.append(points[0])
marker.points.append(points[1])
marker.points.append(points[5])
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.points.append(points[0])
marker.points.append(points[4])
marker.points.append(points[5])
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.points.append(points[3])
marker.points.append(points[2])
marker.points.append(points[6])
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.points.append(points[3])
marker.points.append(points[6])
marker.points.append(points[7])
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker.colors.append(color_rviz)
marker_array.markers.append(copy.deepcopy(marker))
return marker_array
| 7,402 | 30.105042 | 122 | py |
synfeal | synfeal-main/deprecated/raycast_example.py |
# stdlib
import sys
import argparse
# 3rd-party
import trimesh
import numpy as np
import time
def main():
# parser = argparse.ArgumentParser(description='Data Collector')
# parser.add_argument('-m', '--mode', type=str, default='interactive',
# help='interactive/automatic_random_path/automatic_path')
#mesh = trimesh.creation.icosphere()
#mesh = trimesh.exchange.dae.load_collada('/home/danc/models_3d/santuario_collision/Virtudes_Chapel.dae')
start_time = time.time()
mesh = trimesh.load('/home/danc/models_3d/santuario_collision/Virtudes_Chapel.dae', force='mesh')
p1 = np.array([0,0,0])
p2 = np.array([4,0,0])
dp1p2 = np.linalg.norm(p2-p1)
ori = p2 - p1
norm_ori = np.linalg.norm(ori)
ori = ori / norm_ori
# create some rays
#ray_origins = np.array([[0, 0, 0]])
#ray_directions = np.array([[0, 1, 0]])
ray_origins = np.array([p1])
ray_directions = np.array([ori])
# check out the docstring for intersects_location queries
print(mesh.ray.intersects_location.__doc__)
locations, index_ray, index_tri = mesh.ray.intersects_location(
ray_origins=ray_origins,
ray_directions=ray_directions)
print('The rays hit the mesh at coordinates:\n', locations)
print(time.time() - start_time)
# get the first intersection
dists_to_p1 = []
for dist in locations:
dists_to_p1.append(np.linalg.norm(dist - p1))
print(dists_to_p1)
closest_collision = min(dists_to_p1)
print(closest_collision)
print(dp1p2)
if closest_collision < dp1p2:
print('COLISSION')
else:
print('SAFE')
# compare the first intersection with p2 (in terms of distances)
if __name__ == "__main__":
main()
| 1,910 | 24.144737 | 109 | py |
synfeal | synfeal-main/deprecated/rotation_to_direction.py |
from scipy.spatial.transform import Rotation as R
#rotate_y90 = R.from_euler('y', 90, degrees=True).as_matrix()
rotate_y90 = R.from_euler('x', 40, degrees=True).as_quat()
matrix = R.from_quat(rotate_y90).as_matrix()
print(matrix)
print(matrix[:,0]) | 274 | 26.5 | 61 | py |
synfeal | synfeal-main/synfeal_collection/src/automatic_data_collection.py |
# stdlib
import random
import os
from xml.parsers.expat import model
# 3rd-party
import rospy
import tf
import numpy as np
import trimesh
from geometry_msgs.msg import Pose
#from interactive_markers.interactive_marker_server import *
#from interactive_markers.menu_handler import *
from visualization_msgs.msg import *
from gazebo_msgs.srv import SetModelState, GetModelState, SetModelStateRequest
from colorama import Fore
from scipy.spatial.transform import Rotation as R
from synfeal_collection.src.save_dataset import SaveDataset
from utils import *
from utils_ros import *
class AutomaticDataCollection():
def __init__(self, model_name, seq, dbf=None, uvl=None, model3d_config=None, fast=None, save_dataset=True, mode=None):
self.set_state_service = rospy.ServiceProxy(
'/gazebo/set_model_state', SetModelState)
self.model_name = model_name # model_name = 'localbot'
self.dbf = dbf
rospy.wait_for_service('/gazebo/get_model_state')
self.get_model_state_service = rospy.ServiceProxy(
'/gazebo/get_model_state', GetModelState)
# create instance to save dataset
if save_dataset:
self.save_dataset = SaveDataset(
f'{seq}', mode=mode, dbf=dbf, uvl=uvl, model3d_config=model3d_config, fast=fast)
name_model3d_config = model3d_config['name'].split('.')[0]
print(name_model3d_config)
# define minimum and maximum boundaries
self.x_min = model3d_config['volume']['position']['xmin']
self.x_max = model3d_config['volume']['position']['xmax']
self.y_min = model3d_config['volume']['position']['ymin']
self.y_max = model3d_config['volume']['position']['ymax']
self.z_min = model3d_config['volume']['position']['zmin']
self.z_max = model3d_config['volume']['position']['zmax']
self.rx_min = model3d_config['volume']['angles']['rxmin']
self.rx_max = model3d_config['volume']['angles']['rxmax']
self.ry_min = model3d_config['volume']['angles']['rymin']
self.ry_max = model3d_config['volume']['angles']['rymax']
self.rz_min = model3d_config['volume']['angles']['rzmin']
self.rz_max = model3d_config['volume']['angles']['rzmax']
# define minimum and maximum light
self.att_min = model3d_config['light']['att_min']
self.att_max = model3d_config['light']['att_max']
self.att_initial = model3d_config['light']['att_initial']
self.light_names = model3d_config['light']['light_names']
self.use_collision = model3d_config['collision']['use']
self.min_cam_dist = model3d_config['collision']['min_camera_distance']
if self.use_collision:
path=os.environ.get("SYNFEAL_DATASET")
self.mesh_collision = trimesh.load(
f'{path}/models_3d/localbot/{name_model3d_config}/{name_model3d_config}_collision.dae', force='mesh')
else:
self.mesh_collision = False
# set initial pose
print('setting initial pose...')
x = model3d_config['initial_pose']['x']
y = model3d_config['initial_pose']['y']
z = model3d_config['initial_pose']['z']
rx = model3d_config['initial_pose']['rx']
ry = model3d_config['initial_pose']['ry']
rz = model3d_config['initial_pose']['rz']
quaternion = tf.transformations.quaternion_from_euler(rx, ry, rz)
p = Pose()
p.position.x = x
p.position.y = y
p.position.z = z
p.orientation.x = quaternion[0]
p.orientation.y = quaternion[1]
p.orientation.z = quaternion[2]
p.orientation.w = quaternion[3]
self.setPose(p)
rospy.sleep(1)
def generateRandomPose(self):
x = random.uniform(self.x_min, self.x_max)
y = random.uniform(self.y_min, self.y_max)
z = random.uniform(self.z_min, self.z_max)
rx = random.uniform(self.rx_min, self.rx_max)
ry = random.uniform(self.ry_min, self.ry_max)
rz = random.uniform(self.rz_min, self.rz_max)
quaternion = tf.transformations.quaternion_from_euler(rx, ry, rz)
p = Pose()
p.position.x = x
p.position.y = y
p.position.z = z
p.orientation.x = quaternion[0]
p.orientation.y = quaternion[1]
p.orientation.z = quaternion[2]
p.orientation.w = quaternion[3]
return p
def generatePath(self, final_pose=None):
initial_pose = self.getPose().pose
if final_pose == None:
final_pose = self.generateRandomPose()
while True:
xyz_initial = np.array(
[initial_pose.position.x, initial_pose.position.y, initial_pose.position.z])
xyz_final = np.array(
[final_pose.position.x, final_pose.position.y, final_pose.position.z])
l2_dst = np.linalg.norm(xyz_final - xyz_initial)
# if final pose is close to the initial or there is collision, choose another final pose
if l2_dst < 1.5 or self.checkCollision(initial_pose=initial_pose, final_pose=final_pose):
final_pose = self.generateRandomPose()
else:
break
# compute n_steps based on l2_dist
n_steps = int(l2_dst / self.dbf)
print('using n_steps of: ', n_steps)
step_poses = [] # list of tuples
rx, ry, rz = tf.transformations.euler_from_quaternion(
[initial_pose.orientation.x, initial_pose.orientation.y, initial_pose.orientation.z, initial_pose.orientation.w])
pose_initial_dct = {'x': initial_pose.position.x,
'y': initial_pose.position.y,
'z': initial_pose.position.z,
'rx': rx,
'ry': ry,
'rz': rz}
rx, ry, rz = tf.transformations.euler_from_quaternion(
[final_pose.orientation.x, final_pose.orientation.y, final_pose.orientation.z, final_pose.orientation.w])
pose_final_dct = {'x': final_pose.position.x,
'y': final_pose.position.y,
'z': final_pose.position.z,
'rx': rx,
'ry': ry,
'rz': rz}
x_step_var = (pose_final_dct['x'] - pose_initial_dct['x']) / n_steps
y_step_var = (pose_final_dct['y'] - pose_initial_dct['y']) / n_steps
z_step_var = (pose_final_dct['z'] - pose_initial_dct['z']) / n_steps
rx_step_var = (pose_final_dct['rx'] - pose_initial_dct['rx']) / n_steps
ry_step_var = (pose_final_dct['ry'] - pose_initial_dct['ry']) / n_steps
rz_step_var = (pose_final_dct['rz'] - pose_initial_dct['rz']) / n_steps
for i in range(n_steps):
dct = {'x': pose_initial_dct['x'] + (i + 1) * x_step_var,
'y': pose_initial_dct['y'] + (i + 1) * y_step_var,
'z': pose_initial_dct['z'] + (i + 1) * z_step_var,
'rx': pose_initial_dct['rx'] + (i + 1) * rx_step_var,
'ry': pose_initial_dct['ry'] + (i + 1) * ry_step_var,
'rz': pose_initial_dct['rz'] + (i + 1) * rz_step_var}
pose = data2pose(dct)
step_poses.append(pose)
return step_poses
def getPose(self):
return self.get_model_state_service(self.model_name, 'world')
def setPose(self, pose):
req = SetModelStateRequest() # Create an object of type SetModelStateRequest
req.model_state.model_name = self.model_name
req.model_state.pose.position.x = pose.position.x
req.model_state.pose.position.y = pose.position.y
req.model_state.pose.position.z = pose.position.z
req.model_state.pose.orientation.x = pose.orientation.x
req.model_state.pose.orientation.y = pose.orientation.y
req.model_state.pose.orientation.z = pose.orientation.z
req.model_state.pose.orientation.w = pose.orientation.w
req.model_state.reference_frame = 'world'
self.set_state_service(req.model_state)
def generateLights(self, n_steps, random):
lights = []
if random:
lights = [np.random.uniform(
low=self.att_min, high=self.att_max) for _ in range(n_steps)]
else:
initial_light = self.att_initial
final_light = np.random.uniform(
low=self.att_min, high=self.att_max)
step_light = (final_light - initial_light) / n_steps
for i in range(n_steps):
lights.append(initial_light + (i + 1) * step_light)
self.att_initial = final_light
return lights
def setLight(self, light):
for name in self.light_names:
my_str = f'name: "{name}" \nattenuation_quadratic: {light}'
with open('/tmp/set_light.txt', 'w') as f:
f.write(my_str)
os.system(
f'gz topic -p /gazebo/mercado_negro/light/modify -f /tmp/set_light.txt')
def checkCollision(self, initial_pose, final_pose):
if self.use_collision is False:
print('not using COLLISIONS.')
return False
initial_pose.position.x
p1_xyz = np.array(
[initial_pose.position.x, initial_pose.position.y, initial_pose.position.z])
p1_quat = np.array([initial_pose.orientation.x, initial_pose.orientation.y,
initial_pose.orientation.z, initial_pose.orientation.w])
p2_xyz = np.array(
[final_pose.position.x, final_pose.position.y, final_pose.position.z])
p2_quat = np.array([final_pose.orientation.x, final_pose.orientation.y,
final_pose.orientation.z, final_pose.orientation.w])
dist_p1_to_p2 = np.linalg.norm(p2_xyz-p1_xyz)
print(
f' {Fore.BLUE} Checking collision... {Fore.RESET} between {p1_xyz} and {p2_xyz}')
orientation = p2_xyz - p1_xyz
norm_orientation = np.linalg.norm(orientation)
orientation = orientation / norm_orientation
ray_origins = np.array([p1_xyz])
ray_directions = np.array([orientation])
collisions, _, _ = self.mesh_collision.ray.intersects_location(
ray_origins=ray_origins,
ray_directions=ray_directions)
closest_collision_to_p1 = self.getClosestCollision(collisions, p1_xyz)
# compare the closest collision with the position of p2
if closest_collision_to_p1 < dist_p1_to_p2:
# collision
print(f'{Fore.RED} Collision Detected. {Fore.RESET}')
return True
else:
# no collision
# check if p2 camera viewpoint if close to a obstacle.
orientation = R.from_quat(p2_quat).as_matrix()[:, 0]
norm_orientation = np.linalg.norm(orientation)
orientation = orientation / norm_orientation
ray_origins = np.array([p2_xyz])
ray_directions = np.array([orientation])
collisions, _, _ = self.mesh_collision.ray.intersects_location(
ray_origins=ray_origins,
ray_directions=ray_directions)
closest_collision_to_p2 = self.getClosestCollision(
collisions, p2_xyz)
if closest_collision_to_p2 < self.min_cam_dist:
print(
f'{Fore.YELLOW} Final Pose is too close to a obstacle. {Fore.RESET}')
return True
else:
print(f'{Fore.GREEN} NO Collision Detected {Fore.RESET}')
return False
def checkCollisionVis(self, initial_pose, final_pose):
if self.use_collision is False:
print('not using COLLISIONS.')
return False
# load mesh
# TODO #83 this should not be hardcoded
mesh = trimesh.load(
'/home/danc/models_3d/santuario_collision/Virtudes_Chapel.dae', force='mesh')
initial_pose.position.x
p1_xyz = np.array(
[initial_pose.position.x, initial_pose.position.y, initial_pose.position.z])
p1_quat = np.array([initial_pose.orientation.x, initial_pose.orientation.y,
initial_pose.orientation.z, initial_pose.orientation.w])
p2_xyz = np.array(
[final_pose.position.x, final_pose.position.y, final_pose.position.z])
p2_quat = np.array([final_pose.orientation.x, final_pose.orientation.y,
final_pose.orientation.z, final_pose.orientation.w])
dist_p1_to_p2 = np.linalg.norm(p2_xyz-p1_xyz)
print(
f' {Fore.BLUE} Checking collision... {Fore.RESET} between {p1_xyz} and {p2_xyz}')
orientation = p2_xyz - p1_xyz
norm_orientation = np.linalg.norm(orientation)
orientation = orientation / norm_orientation
ray_origins = np.array([p1_xyz])
ray_directions = np.array([orientation])
collisions, _, _ = mesh.ray.intersects_location(
ray_origins=ray_origins,
ray_directions=ray_directions)
closest_collision_to_p1 = self.getClosestCollision(collisions, p1_xyz)
# compare the closest collision with the position of p2
if closest_collision_to_p1 < dist_p1_to_p2:
# collision
print(f'{Fore.RED} Collision Detected. {Fore.RESET}')
return 1
else:
# no collision
# check if p2 camera viewpoint if close to a obstacle.
orientation = R.from_quat(p2_quat).as_matrix()[:, 0]
norm_orientation = np.linalg.norm(orientation)
orientation = orientation / norm_orientation
ray_origins = np.array([p2_xyz])
ray_directions = np.array([orientation])
collisions, _, _ = mesh.ray.intersects_location(
ray_origins=ray_origins,
ray_directions=ray_directions)
closest_collision_to_p2 = self.getClosestCollision(
collisions, p2_xyz)
if closest_collision_to_p2 < self.min_cam_dist:
print(
f'{Fore.YELLOW} Final Pose is too close to a obstacle. {Fore.RESET}')
return 0.5
else:
print(f'{Fore.GREEN} NO Collision Detected {Fore.RESET}')
return 0
def generatePathViz(self, final_pose):
initial_pose = self.getPose().pose
xyz_initial = np.array(
[initial_pose.position.x, initial_pose.position.y, initial_pose.position.z])
xyz_final = np.array(
[final_pose.position.x, final_pose.position.y, final_pose.position.z])
l2_dst = np.linalg.norm(xyz_final - xyz_initial)
# compute n_steps based on l2_dist
n_steps = int(l2_dst / self.dbf)
print('using n_steps of: ', n_steps)
step_poses = [] # list of tuples
rx, ry, rz = tf.transformations.euler_from_quaternion(
[initial_pose.orientation.x, initial_pose.orientation.y, initial_pose.orientation.z, initial_pose.orientation.w])
pose_initial_dct = {'x': initial_pose.position.x,
'y': initial_pose.position.y,
'z': initial_pose.position.z,
'rx': rx,
'ry': ry,
'rz': rz}
rx, ry, rz = tf.transformations.euler_from_quaternion(
[final_pose.orientation.x, final_pose.orientation.y, final_pose.orientation.z, final_pose.orientation.w])
pose_final_dct = {'x': final_pose.position.x,
'y': final_pose.position.y,
'z': final_pose.position.z,
'rx': rx,
'ry': ry,
'rz': rz}
x_step_var = (pose_final_dct['x'] - pose_initial_dct['x']) / n_steps
y_step_var = (pose_final_dct['y'] - pose_initial_dct['y']) / n_steps
z_step_var = (pose_final_dct['z'] - pose_initial_dct['z']) / n_steps
rx_step_var = (pose_final_dct['rx'] - pose_initial_dct['rx']) / n_steps
ry_step_var = (pose_final_dct['ry'] - pose_initial_dct['ry']) / n_steps
rz_step_var = (pose_final_dct['rz'] - pose_initial_dct['rz']) / n_steps
for i in range(n_steps):
dct = {'x': pose_initial_dct['x'] + (i + 1) * x_step_var,
'y': pose_initial_dct['y'] + (i + 1) * y_step_var,
'z': pose_initial_dct['z'] + (i + 1) * z_step_var,
'rx': pose_initial_dct['rx'] + (i + 1) * rx_step_var,
'ry': pose_initial_dct['ry'] + (i + 1) * ry_step_var,
'rz': pose_initial_dct['rz'] + (i + 1) * rz_step_var}
pose = data2pose(dct)
step_poses.append(pose)
return step_poses
def getClosestCollision(self, collisions, p1_xyz):
closest_collision_to_p1 = np.inf
for position_collision in collisions:
dist_collision_p1 = np.linalg.norm(position_collision - p1_xyz)
if dist_collision_p1 < closest_collision_to_p1:
closest_collision_to_p1 = dist_collision_p1
return closest_collision_to_p1
def saveFrame(self):
self.save_dataset.saveFrame()
def getFrameIdx(self):
return self.save_dataset.frame_idx
| 17,567 | 39.018223 | 125 | py |
synfeal | synfeal-main/synfeal_collection/src/interactive_data_collection.py |
import copy
import rospy
from std_msgs.msg import Header, ColorRGBA
from geometry_msgs.msg import Point, Pose, Vector3, Quaternion
from interactive_markers.interactive_marker_server import *
from interactive_markers.menu_handler import *
from visualization_msgs.msg import *
from gazebo_msgs.srv import SetModelState, GetModelState, SetModelStateRequest
from synfeal_collection.src.save_dataset import SaveDataset
class InteractiveDataCollection():
def __init__(self, model_name, seq):
self.set_state_service = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.menu_handler = MenuHandler()
self.model_name = model_name
self.server = InteractiveMarkerServer("interactive_camera")
rospy.wait_for_service('/gazebo/get_model_state')
self.get_model_state_service = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
pose_gazebo = self.get_model_state_service(self.model_name, 'world')
self.pose = copy.deepcopy(pose_gazebo.pose)
self.make6DofMarker(True, InteractiveMarkerControl.MOVE_3D, pose_gazebo.pose, True)
# add interactive marker to save datasets
self.original_pose = Pose(position=Point(x=0, y=0, z=1), orientation=Quaternion(x=0, y=0, z=0, w=1))
self.makeClickMarker(self.original_pose)
self.server.applyChanges()
# create instance to save dataset
self.save_dataset = SaveDataset(seq, mode='interactive')
def makeBox(self, msg, pose, color):
marker = Marker(header=Header(frame_id="world", stamp=rospy.Time.now()),
ns=self.model_name, id=0, frame_locked=False,
type=Marker.SPHERE, action=Marker.ADD, lifetime=rospy.Duration(0),
pose=pose,
scale=Vector3(x=0.05, y=0.05, z=0.05),
color=ColorRGBA(r=color[0], g=color[1], b=color[2], a=1))
return marker
def makeBoxControl(self, msg, pose, color):
control = InteractiveMarkerControl()
control.interaction_mode = InteractiveMarkerControl.BUTTON
control.always_visible = True
control.markers.append(self.makeBox(msg, pose, color))
msg.controls.append(control)
return control
def makeClickMarker(self,pose):
int_marker = InteractiveMarker()
int_marker.header.frame_id = "world"
int_marker.pose = pose
int_marker.scale = 1
int_marker.name = "Save Frame"
int_marker.description = "Click to save frame"
control = self.makeBoxControl(int_marker, pose, color= [0.2,0.8,0.2])
int_marker.controls.append(copy.deepcopy(control))
self.server.insert(int_marker, self.processFeedbackMenu)
self.menu_handler.apply(self.server, int_marker.name)
def make6DofMarker(self, fixed, interaction_mode, pose, show_6dof=False):
int_marker = InteractiveMarker()
int_marker.header.frame_id = "world"
int_marker.pose = pose
int_marker.scale = 0.3
int_marker.name = "simple_6dof"
int_marker.description = "Simple 6-DOF Control"
self.makeBoxControl(int_marker, pose, color= [0.8,0.2,0.2])
int_marker.controls[0].interaction_mode = interaction_mode
if fixed:
int_marker.name += "_fixed"
int_marker.description += "\n(fixed orientation)"
if interaction_mode != InteractiveMarkerControl.NONE:
control_modes_dict = {
InteractiveMarkerControl.MOVE_3D: "MOVE_3D",
InteractiveMarkerControl.ROTATE_3D: "ROTATE_3D",
InteractiveMarkerControl.MOVE_ROTATE_3D: "MOVE_ROTATE_3D"}
int_marker.name += "_" + control_modes_dict[interaction_mode]
int_marker.description = "3D Control"
if show_6dof:
int_marker.description += " + 6-DOF controls"
int_marker.description += "\n" + control_modes_dict[interaction_mode]
if show_6dof:
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 1
control.orientation.y = 0
control.orientation.z = 0
control.name = "rotate_x"
control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS
if fixed:
control.orientation_mode = InteractiveMarkerControl.FIXED
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 1
control.orientation.y = 0
control.orientation.z = 0
control.name = "move_x"
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
if fixed:
control.orientation_mode = InteractiveMarkerControl.FIXED
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 1
control.orientation.z = 0
control.name = "rotate_z"
control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS
if fixed:
control.orientation_mode = InteractiveMarkerControl.FIXED
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 1
control.orientation.z = 0
control.name = "move_z"
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
if fixed:
control.orientation_mode = InteractiveMarkerControl.FIXED
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 0
control.orientation.z = 1
control.name = "rotate_y"
control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS
if fixed:
control.orientation_mode = InteractiveMarkerControl.FIXED
int_marker.controls.append(control)
control = InteractiveMarkerControl()
control.orientation.w = 1
control.orientation.x = 0
control.orientation.y = 0
control.orientation.z = 1
control.name = "move_y"
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
if fixed:
control.orientation_mode = InteractiveMarkerControl.FIXED
int_marker.controls.append(control)
self.server.insert(int_marker, self.processFeedback)
self.menu_handler.apply(self.server, int_marker.name)
def processFeedback(self, feedback):
s = "feedback from marker '" + feedback.marker_name
s += "' / control '" + feedback.control_name + "'"
if feedback.event_type == InteractiveMarkerFeedback.POSE_UPDATE:
rospy.loginfo( s + ": pose changed")
print('feedback = \n' + str(feedback))
self.pose.position.x = feedback.pose.position.x
self.pose.position.y = feedback.pose.position.y
self.pose.position.z = feedback.pose.position.z
self.pose.orientation.x = feedback.pose.orientation.x
self.pose.orientation.y = feedback.pose.orientation.y
self.pose.orientation.z = feedback.pose.orientation.z
self.pose.orientation.w = feedback.pose.orientation.w
req = SetModelStateRequest() # Create an object of type SetModelStateRequest
req.model_state.model_name = self.model_name
req.model_state.pose.position.x = self.pose.position.x
req.model_state.pose.position.y = self.pose.position.y
req.model_state.pose.position.z = self.pose.position.z
req.model_state.pose.orientation.x = self.pose.orientation.x
req.model_state.pose.orientation.y = self.pose.orientation.y
req.model_state.pose.orientation.z = self.pose.orientation.z
req.model_state.pose.orientation.w = self.pose.orientation.w
req.model_state.reference_frame = 'world'
self.set_state_service(req.model_state)
self.server.applyChanges()
def processFeedbackMenu(self, feedback):
s = "feedback from marker '" + feedback.marker_name
s += "' / control '" + feedback.control_name + "'"
if feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK:
self.save_dataset.saveFrame()
def callbackTimer(self,event):
print('Timer called at ' + str(event.current_real))
def getFrameIdx(self):
return self.save_dataset.frame_idx
| 9,016 | 42.350962 | 108 | py |