repo
stringlengths 3
91
| file
stringlengths 16
152
| code
stringlengths 0
3.77M
| file_length
int64 0
3.77M
| avg_line_length
float64 0
16k
| max_line_length
int64 0
273k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
robogym | robogym-master/robogym/worldgen/parser/normalize.py | import ast
import re
from collections import OrderedDict
from decimal import Decimal, getcontext
from typing import List, Union
import numpy as np
from robogym.worldgen.parser.const import float_arg_types, list_types
getcontext().prec = 10
"""
This methods are used internally by parser.py
Internal notes:
normalize() - in-place normalizes and converts an xml dictionary
see docstring for notes about what types are converted
stringify() - in-place de-normalizes, and converts all values to strings
see docstring for notes
normalize_*() - return normal forms of values (numbers, vectors, etc)
raise exception if input value cannot be converted
"""
def normalize(xml_dict: OrderedDict):
"""
The starting point is a dictionary of the form returned by xmltodict.
See that module's documentation here:
https://github.com/martinblech/xmltodict
Normalize a mujoco model XML:
- some nodes have OrderDict value (mostly top-lever such as worldbody)
some nodes have list values (mostly lower level). Check const.py
for more information.
- parameters ('@name', etc) never have list or OrderedDict values
- "true" and "false" are converted to bool()
- numbers are converted to floats
- vectors are converted to np.ndarray()
Note: stringify() is the opposite of this, and converts everything back
into strings in preparation for unparse_dict().
"""
# As a legacy, previously many of our XMLs had an unused model name.
# This removes it (as part of annotate) and can be phased out eventually.
if "@model" in xml_dict:
del xml_dict["@model"]
for key, value in xml_dict.items():
if isinstance(value, OrderedDict):
# There is one exception.
# <default> symbol occurs twice.
# Once as OrderDict (top-level), once as list (lower-level).
if key == "default":
if "@class" in value:
xml_dict[key] = [value]
elif key in list_types:
xml_dict[key] = [value]
normalize(value)
continue
if isinstance(value, list):
for child in value:
normalize(child)
continue
if isinstance(value, str):
xml_dict[key] = normalize_value(value)
# sometimes data is stored as int when it's float.
# We make a conversion here.
if key in float_arg_types:
if isinstance(xml_dict[key], int):
xml_dict[key] = float(xml_dict[key])
elif isinstance(xml_dict[key], np.ndarray):
xml_dict[key] = xml_dict[key].astype(np.float64)
def num2str(num: Decimal) -> str:
ret = "%g" % Decimal("%.6f" % num)
if ret == "-0":
return "0"
else:
return ret
def vec2str(vec: List) -> str:
return " ".join([num2str(v) for v in vec])
def is_normalizeable(normalize_function, value) -> bool:
"""
Wraps a normalize_*() function, and returns True if value can be
normalized by normalize_function, otherwise returns False.
"""
try:
normalize_function(value)
return True
except Exception:
return False
def normalize_numeric(value):
""" Normalize a numeric value into a float. """
if isinstance(value, (float, int, np.float64, np.int64)):
return value
if isinstance(value, (str, bytes)):
f = float(value)
if f == int(f): # preferentially return integers if equal
return int(f)
return f
raise ValueError("Cannot convert {} to numeric".format(value))
def normalize_vector(value: List) -> np.array:
""" Normalize a vector value to a np.ndarray(). """
if isinstance(value, np.ndarray):
return value
if (
isinstance(value, (list, tuple))
and len(value) > 0
and is_normalizeable(normalize_numeric, value[0])
):
return np.array(value)
if isinstance(value, str):
# Split on spaces, filter empty, convert to numpy array
if "," in value or re.search("\\[.*\\]", value) is not None:
return np.array(ast.literal_eval(value))
else:
split = value.split()
return np.array([normalize_numeric(v) for v in split])
raise ValueError("Cannot convert {} to vector".format(value))
def normalize_boolean(value):
""" Normalize a boolean value to a bool(). """
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower().strip() == "true":
return True
if value.lower().strip() == "false":
return False
raise ValueError("Cannot convert {} to boolean".format(value))
def normalize_none(value):
""" Normalize a none string value to a None. """
if isinstance(value, None.__class__):
return value
if isinstance(value, str):
if value.lower().strip() == "none":
return None
raise ValueError("Cannot convert {} to None".format(value))
def normalize_string(value):
""" Normalize a string value. """
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, str):
return value.strip()
raise ValueError("Cannot convert {} to string".format(value))
def normalize_value(value):
""" Return the normalized version of a value by trying normalize_*(). """
if value is None:
return None
for normalizer in (
normalize_numeric,
normalize_vector,
normalize_none,
normalize_boolean,
normalize_string,
):
try:
return normalizer(value)
except Exception:
continue
raise ValueError("Cannot normalize {}: {}".format(type(value), value))
def stringify(xml_dict: Union[OrderedDict, list]):
"""
De-normalize xml dictionary (or list), converting all pythonic values (arrays, bools)
into strings that will be used in the final XML.
This is the opposite of normalize().
"""
if isinstance(xml_dict, OrderedDict):
enumeration = list(xml_dict.items())
elif isinstance(xml_dict, list):
enumeration = enumerate(xml_dict)
for key, value in enumeration:
# Handle a list of nodes to stringify
if isinstance(value, list):
if len(value) == 0:
del xml_dict[key]
else:
if sum(
[isinstance(v, (int, float, np.float32, np.int)) for v in value]
) == len(value):
xml_dict[key] = vec2str(value)
else:
stringify(value)
elif isinstance(value, OrderedDict):
stringify(value)
elif isinstance(value, (np.ndarray, tuple)):
xml_dict[key] = vec2str(value)
elif isinstance(value, float):
xml_dict[key] = num2str(value) # format with fixed decimal places
elif isinstance(value, bool): # MUST COME BEFORE int() CHECK
xml_dict[key] = str(value).lower() # True -> 'true', etc.
elif isinstance(value, int): # isinstance(True, int) -> True. SAD!
xml_dict[key] = str(value) # Format without decimal places
elif isinstance(value, str):
pass # Value is already fine
elif value is None:
pass
else:
raise ValueError("Bad type for key {}: {}".format(key, type(value)))
| 7,538 | 33.741935 | 89 | py |
robogym | robogym-master/robogym/wrappers/parametric.py | import gym
class EnvParameterWrapper(gym.Wrapper):
""" Generic parameter that modifies environment parameters on each reset """
def __init__(self, env, parameter_name: str):
super().__init__(env)
self.parameter_name = parameter_name
self.original_value = getattr(self.unwrapped.parameters, self.parameter_name)
def step(self, action):
return self.env.step(action)
def new_value(self):
raise NotImplementedError
def reset(self, **kwargs):
setattr(self.unwrapped.parameters, self.parameter_name, self.new_value())
return self.env.reset(**kwargs)
class RandomizedPerpendicularCubeSizeWrapper(EnvParameterWrapper):
""" Randomize size of the "perpendicular" cube """
def __init__(self, env=None, cube_size_range=None):
super().__init__(env, "cube_size_multiplier")
if cube_size_range is None:
cube_size_range = [0.95, 1.05]
self._cube_size_range = cube_size_range
def new_value(self):
return self.unwrapped._random_state.uniform(
self._cube_size_range[0], self._cube_size_range[1]
)
| 1,143 | 28.333333 | 85 | py |
robogym | robogym-master/robogym/wrappers/cube.py | from collections import OrderedDict
import gym
import numpy as np
from gym.spaces import Box, Dict
from robogym.wrappers import randomizations
from robogym.wrappers.randomizations import loguniform
from robogym.wrappers.util import update_obs_space
class RandomizedCubeSizeWrapper(randomizations.RandomizedBodyWrapper):
def __init__(self, env=None, cube_size_range=[0.95, 1.05]):
super().__init__(env)
self._cube_size_range = cube_size_range
def _get_observation_space_delta(self, sim):
return OrderedDict([("cube_size", (1,))])
def _get_field(self, sim):
cube_idx = sim.model.geom_name2id("cube:middle")
cube_size = sim.model.geom_size[
cube_idx
] # the other unnamed geom is target cube
ret = {"cube_size": cube_size}
if "cube:top" in sim.model.body_names:
for name in ["cube:top", "cube:bottom"]:
idx = sim.model.body_name2id(name)
ret[name] = sim.model.body_pos[idx]
return ret
def _set_field(self, sim):
cube_geom_idxs = [sim.model.geom_name2id("cube:middle")]
if "cube:top" in sim.model.geom_names:
cube_geom_idxs += [
sim.model.geom_name2id(name) for name in ["cube:top", "cube:bottom"]
]
random_state = self.unwrapped._random_state
scale = random_state.uniform(
self._cube_size_range[0], self._cube_size_range[1], size=[1]
)
val = self._orig_value["cube_size"] * scale
for cube_geom_idx in cube_geom_idxs:
sim.model.geom_size[cube_geom_idx] = val
# For face cube, we have to move bodies for rescaling to work.
if "cube:top" in sim.model.body_names:
for name in ["cube:top", "cube:bottom"]:
idx = sim.model.body_name2id(name)
sim.model.body_pos[idx] = self._orig_value[name] * scale
return OrderedDict([("cube_size", val.copy())])
class RandomizedWindWrapper(gym.Wrapper):
def __init__(self, env=None, force_std=1.0, max_mean_time_between=0.8):
super().__init__(env)
self._force_std = force_std
self._max_mean_time_between = max_mean_time_between
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
sim = self.unwrapped.sim
self._hit_prob = loguniform(
random_state=self.unwrapped._random_state,
low=0.01
* sim.nsubsteps
* sim.model.opt.timestep
/ self._max_mean_time_between,
high=sim.nsubsteps * sim.model.opt.timestep / self._max_mean_time_between,
)
return obs
def step(self, action):
ret = self.env.step(action)
sim = self.unwrapped.sim
i = sim.model.body_name2id("cube:middle")
cube_mass = sim.model.body_mass[i]
sim.data.xfrc_applied[i, :3] *= 0.99 # TODO: make substeps dependent
if self.unwrapped._random_state.random_sample() < self._hit_prob:
sim.data.xfrc_applied[i, :3] = (
self.unwrapped._random_state.randn(3) * cube_mass * self._force_std
)
return ret
class CubeFreezingPhasespaceBody(randomizations.FreezingPhasespaceBody):
def __init__(self, env=None, disappear_p_1s=0.02, freeze_scale_s=1.0):
super().__init__(
env,
keys=[
"noisy_relative_goal_pos",
"noisy_relative_goal_quat",
"noisy_relative_goal_face_angle",
"noisy_achieved_goal_pos",
"noisy_achieved_goal_quat",
"noisy_achieved_goal_face_angle",
"noisy_cube_pos",
],
disappear_p_1s=disappear_p_1s,
freeze_scale_s=freeze_scale_s,
)
class StopOnFallWrapper(gym.Wrapper):
def __init__(self, env=None, drop_reward=-20.0, min_episode_length=-1):
super().__init__(env)
self.observation_space = update_obs_space(env, {"fell_down": (1,)})
self.steps = 0
self.drop_reward = drop_reward
self.env.unwrapped.reward_names.append("drop")
self.min_episode_length = min_episode_length
self.drops_so_far = 0
self.first_drop = 0
def reset(self, *args, **kwargs):
self.drops_so_far = 0
self.first_drop = 0
self.steps = 0
return self.observation(self.env.reset(*args, **kwargs))
def step(self, action):
obs, rew, done, info = self.env.step(action)
# Handle dropping.
current_drop_reward = 0.0
if self._is_fallen():
done = True
self.drops_so_far += 1
if not self.first_drop:
# Penalize only first frame where cube dropped.
current_drop_reward = self.drop_reward
self.first_drop = info["successes_so_far"] + 1
if self.steps < self.min_episode_length:
# If we require a minimum episode length, do not return a terminal state until
# we have reached the minimum.
done = False
rew = rew + [current_drop_reward]
info["fell_down"] = self._is_fallen()
info["drops_so_far"] = self.drops_so_far
info["first_drop"] = self.first_drop
self.steps += 1
return self.observation(obs), rew, done, info
def observation(self, observation):
observation["fell_down"] = np.array([self._is_fallen()])
return observation
def _is_fallen(self):
cube_middle_idx = self.unwrapped.sim.model.site_name2id("cube:center")
cube_middle_pos = self.unwrapped.sim.data.site_xpos[cube_middle_idx]
return cube_middle_pos[2] < 0.04
class AngleObservationWrapper(gym.ObservationWrapper):
def __init__(self, env=None):
"""Change angles to sines and cosines"""
super().__init__(env)
new_spaces = {}
for name, value in self.env.observation_space.spaces.items():
if name.endswith("_angle"):
new_spaces[name] = Box(
-np.inf, np.inf, [value.shape[0] * 2], value.dtype
)
else:
new_spaces[name] = value
self.observation_space = Dict(new_spaces)
def observation(self, observation):
extended_observation = OrderedDict()
for key in observation:
if key.endswith("_angle"):
extended_observation[key] = np.concatenate(
[np.cos(observation[key]), np.sin(observation[key])]
)
else:
extended_observation[key] = observation[key]
return extended_observation
| 6,697 | 35.601093 | 90 | py |
robogym | robogym-master/robogym/wrappers/randomizations.py | import copy
import math
from collections import OrderedDict, deque
import gym
import numpy as np
from gym.spaces import Box, Dict
from robogym.utils.dactyl_utils import actuated_joint_range
from robogym.utils.rotation import (
normalize_angles,
quat_average,
quat_from_angle_and_axis,
quat_mul,
quat_normalize,
)
from robogym.wrappers.util import update_obs_space
def loguniform(random_state, low, high, size=[]):
return np.exp(random_state.uniform(np.log(low), np.log(high), size=size))
class RandomizedBodyWrapper(gym.ObservationWrapper):
def __init__(self, env=None):
"""
Randomize properties of some bodies.
"""
super().__init__(env)
self._orig_value = None
assert hasattr(self.unwrapped, "sim")
delta = self._get_observation_space_delta(self.unwrapped.sim)
self.observation_space = update_obs_space(self.env, delta)
def reset(self, *args, **kwargs):
sim = self.unwrapped.sim
if self._orig_value is None:
self._orig_value = copy.deepcopy(self._get_field(sim))
self._obs_delta = self._set_field(sim)
# We want to reset only after updating the parameters, the reason is that reset will run
# the simulation a bit until the cube is on the palm, so, if we reset before, it will run
# with parameters from the previous episode which is problematic for ADR
obs = self.env.reset(*args, **kwargs)
return self.observation(obs)
def observation(self, obs):
new_obs = OrderedDict()
for key, value in obs.items():
new_obs[key] = value
for key, value in self._obs_delta.items():
new_obs[key] = copy.deepcopy(value)
if isinstance(new_obs[key], np.ndarray):
new_obs[key] = new_obs[key].ravel()
return new_obs
def _get_observation_space_delta(self, sim):
return {}
def _get_field(self, sim):
"""Called once per environments existance to establish initial value
that simulator assigns to some quantity, e.g. gravity"""
return None
def _set_field(self, sim):
"""Called every time episode is reset to update the new value for quantity
code can reference self._orig_value to get original value returned by
self._get_field."""
raise NotImplementedError()
class RandomizedBodyInertiaWrapper(RandomizedBodyWrapper):
def __init__(self, env=None, mass_range=[0.5, 1.5]):
super().__init__(env)
self._mass_range = mass_range
def _get_observation_space_delta(self, sim):
return OrderedDict([("body_inertia", sim.model.body_inertia.shape)])
def _get_field(self, sim):
return sim.model.body_inertia
def _set_field(self, sim):
val = sim.model.body_inertia[:] = (
self._orig_value
* self.unwrapped._random_state.uniform(
low=self._mass_range[0],
high=self._mass_range[1],
size=(sim.model.body_inertia.shape[0], 1),
)
)
return OrderedDict([("body_inertia", val.copy())])
class RandomizedFrictionBaseWrapper(RandomizedBodyWrapper):
def __init__(self, env, multiplier_ranges, geom_name_prefix=None):
"""
Contact friction parameters for dynamically generated contact pairs.
1. the sliding friction, acting along both axes of the tangent plane.
2. the torsional friction, acting around the contact normal.
3. the rolling friction, acting around both axes of the tangent plane.
We expect the multiplier_ranges is a numpy array of shape (3, 2)
multiplier_ranges[0], ...[1], ...[2] correspond to multiplier range that should
be applied to three types of contact friction irrespectively.
"""
super().__init__(env)
self._multiplier_ranges = np.array(multiplier_ranges).copy()
assert self._multiplier_ranges.shape == (3, 2)
if geom_name_prefix is None:
self._geom_names = list(self.env.unwrapped.sim.model.geom_names).copy()
else:
self._geom_names = [
name
for name in self.env.unwrapped.sim.model.geom_names
if name.startswith(geom_name_prefix)
]
self._geom_ids = [
self.unwrapped.sim.model.geom_name2id(name) for name in self._geom_names
]
self._geom_ids = np.array(self._geom_ids)
# Used by ADR
self._multiplier_values = None
def _get_observation_space_delta(self, sim):
return OrderedDict([("friction", sim.model.geom_friction.shape)])
def _get_field(self, sim):
return sim.model.geom_friction
def _set_field(self, sim):
if self._multiplier_values is not None:
assert len(self._multiplier_values) == self._orig_value.shape[-1]
for col, multiplier in enumerate(self._multiplier_values):
val = self._orig_value[self._geom_ids, col] * multiplier
sim.model.geom_friction[self._geom_ids, col] = val
else:
assert len(self._multiplier_ranges) == self._orig_value.shape[-1]
for col, multi_range in enumerate(self._multiplier_ranges):
# use a single multiplier for each type of friction. Avoids "averaging" out
# friction.
multiplier = self.unwrapped._random_state.uniform(
multi_range[0], multi_range[1]
)
val = self._orig_value[self._geom_ids, col] * multiplier
sim.model.geom_friction[self._geom_ids, col] = val
return OrderedDict([("friction", sim.model.geom_friction.copy())])
def update_parameters(self, slide_multiplier, spin_multiplier, roll_multiplier):
self._multiplier_values = [slide_multiplier, spin_multiplier, roll_multiplier]
class RandomizedFrictionWrapper(RandomizedFrictionBaseWrapper):
def __init__(self, env=None, multiplier_range=[0.7, 1.3]):
multiplier_ranges = [multiplier_range] * 3
super().__init__(env, multiplier_ranges, "robot0:")
class RandomizedRobotFrictionWrapper(RandomizedFrictionBaseWrapper):
def __init__(
self, env=None, multiplier_ranges=[[0.7, 1.3], [0.5, 1.5], [0.5, 1.5]]
):
super().__init__(env, multiplier_ranges, "robot0:")
class RandomizedCubeFrictionWrapper(RandomizedFrictionBaseWrapper):
def __init__(
self, env=None, multiplier_ranges=[[0.5, 1.5], [0.2, 5.0], [0.2, 5.0]]
):
super().__init__(env, multiplier_ranges, "cube:")
class RandomizedGravityWrapper(RandomizedBodyWrapper):
def __init__(self, env=None, gravity_std=0.4):
super().__init__(env)
self._gravity_std = gravity_std
def _get_observation_space_delta(self, sim):
return OrderedDict([("gravity", sim.model.opt.gravity.shape)])
def _get_field(self, sim):
return sim.model.opt.gravity
def _set_field(self, sim):
val = sim.model.opt.gravity[
:
] = self._orig_value + self._gravity_std * self.unwrapped._random_state.randn(3)
return OrderedDict([("gravity", val.copy())])
class RandomizedTimestepWrapper(RandomizedBodyWrapper):
def __init__(
self,
env=None,
min_lambda=125 * 10,
max_lambda=1000 * 10,
adr_bias_magic=0.6,
adr_variance_magic=1.0,
):
"""Randomize the environment timestep by a value from an exponential distribution
with the parameter lambda sampled once per episode from [min_lambda,max_lambda]."""
super().__init__(env)
self._min_lambda = min_lambda
self._max_lambda = max_lambda
self._adr_bias_magic = adr_bias_magic
self._adr_variance_magic = adr_variance_magic
self._adr_bias = 0.0
self._adr_variance = 0.0
self._side = 1.0 # positive or negative
self._p_flip_pos = 0.5
self._p_flip_neg = 0.5
self._positive_lambda = 0.0
self._negative_lambda = 0.0
self._positive_lambda = 0.0
self._negative_lambda = 0.0
self._bias_multiplier = 0.0
self._variance_multiplier = 0.0
def _get_observation_space_delta(self, sim):
return OrderedDict([("timestep_lambda", (2,)), ("timestep_multipliers", (2,))])
def _get_field(self, sim):
return sim.model.opt.timestep
def update_adr_bias(self, adr_bias):
self._adr_bias = adr_bias
def update_adr_variance(self, adr_variance):
self._adr_variance = adr_variance
def _set_field(self, *args, **kwargs):
self._bias_multiplier = np.exp(self._adr_bias * self._adr_bias_magic)
self._variance_multiplier = np.exp(
self._adr_variance * self._adr_variance_magic
)
self._positive_lambda = self.unwrapped._random_state.uniform(
self._min_lambda, self._max_lambda
)
self._negative_lambda = self.unwrapped._random_state.uniform(
self._min_lambda, self._max_lambda
)
self._side = self.unwrapped._random_state.choice([-1.0, 1.0])
self._p_flip_pos = self.unwrapped._random_state.uniform()
self._p_flip_neg = self.unwrapped._random_state.uniform()
return OrderedDict(
[
("timestep_lambda", [self._positive_lambda, self._negative_lambda]),
(
"timestep_multipliers",
[self._bias_multiplier, self._variance_multiplier],
),
]
)
def step(self, action):
obs, rew, done, info = self.env.step(action)
# Simulate flipping somehow
if self._side > 0:
if self.unwrapped._random_state.uniform() > self._p_flip_pos:
self._side = -self._side
else:
if self.unwrapped._random_state.uniform() > self._p_flip_neg:
self._side = -self._side
if self._side > 0:
noise = self.unwrapped._random_state.exponential(
1.0 / self._positive_lambda
)
else:
noise = self.unwrapped._random_state.exponential(
1.0 / self._negative_lambda
)
noise *= self._variance_multiplier
if self._side < 0:
# Rescale
fraction = noise / self._orig_value
noise = self._orig_value * (fraction / (1 + fraction))
if self._side < 0:
# Clip the noise if it's negative so that the simulation is stable
noise = np.clip(noise, 0.0, self._orig_value / 2)
self.unwrapped.sim.model.opt.timestep = self._bias_multiplier * (
self._orig_value + self._side * noise
)
return self.observation(obs), rew, done, info
# empirical constant to allow quaternion noise to be specified at same level as
# Euler angle additive perturbation measured in radians
QUAT_NOISE_CORRECTION = 1.96
class RandomizeObservationWrapper(gym.ObservationWrapper):
def __init__(self, env=None, levels=None):
super().__init__(env)
self._correlated_multiplier = 1.0
self._uncorrelated_multipler = 1.0
self._levels = levels
self._additive_bias = {}
self._multiplicative_bias = {}
new_spaces = self.env.observation_space.spaces.copy()
new_spaces.update(
{f"noisy_{k}": self.env.observation_space.spaces[k] for k in self._levels}
)
self.observation_space = Dict(new_spaces)
self.random_state = self.unwrapped._random_state
def key_length(self, key):
if not key.endswith("_quat"):
return self.env.observation_space.spaces[key].shape[0]
else:
assert self.env.observation_space.spaces[key].shape[0] == 4
return 1
def reset(self, *args, **kwargs):
observation = self.env.reset(*args, **kwargs)
for key in sorted(self._levels):
key_len = self.key_length(key)
self._additive_bias[key] = (
self.random_state.randn(key_len)
* self._levels[key].get("additive", 0.0)
* self._correlated_multiplier
)
self._multiplicative_bias[key] = (
1.0
+ self.random_state.randn(key_len)
* self._levels[key].get("multiplicative", 0.0)
* self._correlated_multiplier
)
return self.observation(observation)
def observation(self, observation):
randomized_observation = OrderedDict()
for key in observation:
randomized_observation[key] = observation[key]
for key in sorted(self._levels):
key_len = self.key_length(key)
uncorrelated_bias = (
self.random_state.randn(key_len)
* self._levels[key].get("uncorrelated", 0.0)
* self._uncorrelated_multipler
)
additive_bias = self._additive_bias[key] + uncorrelated_bias
if f"noisy_{key}" in observation:
# There is already noisy value available for this observation key,
# we apply noise on top of the noisy value.
obs_key = f"noisy_{key}"
else:
# Apply noise on top of noiseless observation if no noisy value available.
obs_key = key
new_value = observation[obs_key].copy()
if not key.endswith("_quat"):
new_value *= self._multiplicative_bias[key]
new_value += additive_bias
else:
assert np.allclose(self._multiplicative_bias[key], 1.0)
noise_axis = self.random_state.uniform(-1.0, 1.0, size=(3,))
additive_bias *= QUAT_NOISE_CORRECTION
noise_quat = quat_from_angle_and_axis(additive_bias, noise_axis)
new_value = quat_normalize(quat_mul(new_value, noise_quat))
randomized_observation[f"noisy_{key}"] = new_value
return randomized_observation
def update_parameters(self, correlated_multiplier, uncorrelated_multiplier):
self._correlated_multiplier = correlated_multiplier
self._uncorrelated_multipler = uncorrelated_multiplier
class FreezingPhasespaceMarkers(gym.ObservationWrapper):
def __init__(self, env=None, key=None, disappear_p_1s=None, freeze_scale_s=None):
"""Make phasespace markers disappear sometimes (which is simulated by returning old values)
Parameters
----------
key: str
Name of key in obs that representes the markers.
must be (3 * n_markers) array )
disappear_p_1s: float
Probability that one of markers will disappear during period of 1 second
freeze_scale_s: float
For how long does a marker disappear
"""
super().__init__(env)
n_substeps = self.unwrapped.sim.nsubsteps
substep_duration_s = self.unwrapped.sim.model.opt.timestep
step_duration_s = n_substeps * substep_duration_s
self._key = key
self._disappear_p = 1.0 - (1.0 - disappear_p_1s) ** step_duration_s
self._freeze_scale_steps = freeze_scale_s / step_duration_s
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
assert len(obs[self._key]) % 3 == 0
self._n_markers = len(obs[self._key]) // 3
self._freeze_left = np.array([0 for _ in range(self._n_markers)])
self._obs_buffer = obs[self._key].copy()
return obs
def observation(self, observation):
new_observation = OrderedDict()
for key in observation:
new_observation[key] = observation[key]
# update nonfrozen observations for all markers.
for i, left in enumerate(self._freeze_left[: self._n_markers]):
if left <= 0:
self._obs_buffer[3 * i: 3 * (i + 1)] = observation[self._key][
3 * i: 3 * (i + 1)
]
new_observation[self._key] = self._obs_buffer.copy()
# update freeze_left
self._freeze_left = np.maximum(self._freeze_left - 1, 0)
does_freeze = (
self.unwrapped._random_state.random_sample(size=self._n_markers)
< self._disappear_p
)
new_freeze_len = np.round(
self.unwrapped._random_state.exponential(
scale=self._freeze_scale_steps, size=self._n_markers
)
)
self._freeze_left = (
1 - does_freeze
) * self._freeze_left + does_freeze * new_freeze_len
return new_observation
class FreezingPhasespaceBody(gym.ObservationWrapper):
def __init__(self, env=None, keys=None, disappear_p_1s=None, freeze_scale_s=None):
"""Make some keys disappear sometimes (which is simulated by returning old values)
Parameters
----------
keys: str
Names of keys to be frozen.
disappear_p_1s: float
Probability that one of markers will disappear during period of 1 second
freeze_scale_s: float
For how long does a marker disappear
"""
super().__init__(env)
keys = [k for k in keys if k in env.observation_space.spaces]
n_substeps = self.unwrapped.sim.nsubsteps
substep_duration_s = self.unwrapped.sim.model.opt.timestep
step_duration_s = n_substeps * substep_duration_s
self._keys = keys
self._disappear_p = 1.0 - (1.0 - disappear_p_1s) ** step_duration_s
self._freeze_scale_steps = freeze_scale_s / step_duration_s
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
self._freeze_left = 0
self._obs_buffer = {k: obs[k].copy() for k in self._keys}
return obs
def observation(self, observation):
# update nonfrozen observations for all markers.
if self._freeze_left <= 0:
for k in self._keys:
self._obs_buffer[k] = observation[k].copy()
# update freeze_left
self._freeze_left = max(self._freeze_left - 1, 0)
does_freeze = self.unwrapped._random_state.random_sample() < self._disappear_p
new_freeze_len = np.round(
self.unwrapped._random_state.exponential(scale=self._freeze_scale_steps)
)
self._freeze_left = (
1 - does_freeze
) * self._freeze_left + does_freeze * new_freeze_len
new_observation = OrderedDict()
for key in observation:
new_observation[key] = (
self._obs_buffer[key].copy() if key in self._keys else observation[key]
)
return new_observation
class RandomizedActionLatency(RandomizedBodyWrapper):
def __init__(self, env, max_delay=1):
"""For random coordinates of action space return old values"""
super().__init__(env)
self._max_delay = max_delay
assert (
isinstance(self.env.action_space, Box)
and len(self.env.action_space.shape) == 1
)
self._action_size = self.env.action_space.shape[0]
delta = OrderedDict(
[
("performed_action", self.env.action_space.shape),
("action_history", (self._max_delay + 1, self._action_size)),
("action_delay", self.env.action_space.shape),
]
)
self.observation_space = update_obs_space(self.env, delta)
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
self._action_history = np.zeros((self._max_delay + 1, self._action_size))
self._action_delay = self.unwrapped._random_state.randint(
low=0, high=self._max_delay + 1, size=self._action_size
)
obs["action_history"] = self._action_history[:-1]
obs["action_delay"] = self._action_delay
return obs
def step(self, action):
self._action_history[0], self._action_history[1:] = (
action,
self._action_history[:-1],
)
new_action = self._action_history[
self._action_delay, list(range(self._action_size))
]
obs, rew, done, info = self.env.step(new_action.copy())
obs["action_history"] = self._action_history[:-1]
obs["action_delay"] = self._action_delay
return obs, rew, done, info
def update_parameters(self, max_delay):
self._max_delay = max_delay
class RandomizedDampingWrapper(RandomizedBodyWrapper):
def __init__(self, env=None, damping_range=[0.3, 3.0], joint_names=[]):
self._damping_range = damping_range
self._joint_names = joint_names
super().__init__(env)
def _get_observation_space_delta(self, sim):
return OrderedDict([("joint_damping", (len(self._joint_names),))])
def _get_field(self, sim):
joint_ids = [sim.model.joint_name2id(name) for name in self._joint_names]
dof_ids = [
idx for idx in range(sim.model.nv) if sim.model.dof_jntid[idx] in joint_ids
]
return [sim.model.dof_damping[idx] for idx in dof_ids]
def _set_field(self, sim):
joint_ids = [sim.model.joint_name2id(name) for name in self._joint_names]
dof_ids = [
idx for idx in range(sim.model.nv) if sim.model.dof_jntid[idx] in joint_ids
]
val = self._orig_value * loguniform(
self.unwrapped._random_state,
self._damping_range[0],
self._damping_range[1],
size=[len(dof_ids)],
)
sim.model.dof_damping[dof_ids] = val
return OrderedDict([("joint_damping", val.copy())])
class RandomizedJointLimitWrapper(RandomizedBodyWrapper):
def __init__(self, env=None, joint_names=[], relative_std=0.15):
"""Randomize the joint limit and update joint range and actuator ctrl range accordingly.
"""
self._joint_names = joint_names or env.unwrapped.sim.model.joint_names
self._relative_std = relative_std
assert set(self._joint_names).issubset(set(env.unwrapped.sim.model.joint_names))
self._joint_ids = [
env.unwrapped.sim.model.joint_name2id(name) for name in self._joint_names
]
super().__init__(env)
def _get_observation_space_delta(self, sim):
return OrderedDict([("joint_limit", (len(self._joint_names),))])
def _get_field(self, sim):
jnt_limits = actuated_joint_range(sim)[self._joint_ids]
assert all(jnt_limits[:, 1] - jnt_limits[:, 0] >= 0.0)
return jnt_limits
def _set_field(self, sim):
limit_widths = self._orig_value[:, 1] - self._orig_value[:, 0]
stds = limit_widths * self._relative_std
stds_reshaped = np.repeat(stds, 2).reshape(
len(self._joint_names), 2
) * self._random_noises(len(self._joint_names))
new_jnt_limits = self._orig_value.copy()
for idx, jnt_id in enumerate(self._joint_ids):
min_width = limit_widths[idx] * 0.001
# Let's go through the joint limit range one by one to handle special case,
# i.e., if the lower bound is 0.0, we should not lower it to be negative
low, high = new_jnt_limits[idx]
if low == 0.0 and high > 0:
low = max(0.0, low + stds_reshaped[idx][0])
high = max(low + min_width, high + stds_reshaped[idx][1])
elif low < 0 and high == 0.0:
high = min(0.0, high + stds_reshaped[idx][1])
low = min(high - min_width, low + stds_reshaped[idx][0])
else:
low += stds_reshaped[idx][0]
high = max(low + min_width, high + stds_reshaped[idx][1])
new_jnt_limits[idx][0] = low
new_jnt_limits[idx][1] = high
# Apply the new joint limit to the joint range and actuator control range.
sim.model.jnt_range[self._joint_ids] = new_jnt_limits.copy()
for jnt_id, jnt_name in zip(self._joint_ids, self._joint_names):
actuator_name = jnt_name.replace(":", ":A_")
if actuator_name not in sim.model.actuator_names:
continue
actuator_id = sim.model.actuator_name2id(actuator_name)
if actuator_name[-3:] == "FJ1":
# This actuator should control the unactuated "*FJ0' joint as well.
other_jnt_name = jnt_name.replace("FJ1", "FJ0")
other_jnt_id = sim.model.joint_name2id(other_jnt_name)
fj0_range = sim.model.jnt_range[other_jnt_id]
fj1_range = sim.model.jnt_range[jnt_id]
sim.model.actuator_ctrlrange[actuator_id] = np.array(
[min(fj0_range[0], fj1_range[0]), fj0_range[1] + fj1_range[1]]
)
else:
sim.model.actuator_ctrlrange[actuator_id] = sim.model.jnt_range[jnt_id]
return OrderedDict([("joint_limit", new_jnt_limits)])
def _random_noises(self, n_jnt):
return self.unwrapped._random_state.randn(n_jnt, 2)
def update_parameters(self, relative_std):
self._relative_std = relative_std
class RandomizedTendonRangeWrapper(RandomizedBodyWrapper):
"""Randomize and update all tendon ranges."""
def __init__(self, env=None, relative_std=0.15):
self._relative_std = relative_std
super().__init__(env)
def _get_observation_space_delta(self, sim):
return OrderedDict([("tendon_range", (len(sim.model.tendon_names),))])
def _get_field(self, sim):
assert all(sim.model.tendon_range[:, 1] - sim.model.tendon_range[:, 0] >= 0.0)
return sim.model.tendon_range
def _set_field(self, sim):
widths = self._orig_value[:, 1] - self._orig_value[:, 0]
assert widths.shape == (len(sim.model.tendon_names),)
bounds_change = np.repeat(widths * self._relative_std, 2)
bounds_change = bounds_change.reshape(len(sim.model.tendon_names), 2)
bounds_change *= self.unwrapped._random_state.randn(
len(sim.model.tendon_names), 2
)
new_tendon_ranges = self._orig_value.copy()
for tendon in sim.model.tendon_names:
tendon_id = sim.model.tendon_name2id(tendon)
lower, upper = new_tendon_ranges[tendon_id]
assert lower >= 0.0, "tendon range should have nonnegative lower bound"
assert upper >= 0.0, "tendon range should have nonnegative upper bound"
lower = max(0.0, lower + bounds_change[tendon_id][0])
upper = max(
lower + (widths[tendon_id] * 0.001), upper + bounds_change[tendon_id][1]
)
new_tendon_ranges[tendon_id][0] = lower
new_tendon_ranges[tendon_id][1] = upper
sim.model.tendon_range[:] = new_tendon_ranges.copy()
return OrderedDict([("tendon_range", new_tendon_ranges)])
def update_parameters(self, relative_std):
self._relative_std = relative_std
class RandomizedKpWrapper(RandomizedBodyWrapper):
def __init__(self, env=None, kp_range=[0.75, 1.5], actuator_names=[]):
self._kp_range = kp_range
self._actuator_names = actuator_names
super().__init__(env)
def _get_observation_space_delta(self, sim):
return OrderedDict([("actuator_kp", (len(self._actuator_names),))])
def _get_field(self, sim):
actuator_ids = [
sim.model.actuator_name2id(name) for name in self._actuator_names
]
return [sim.model.actuator_gainprm[idx, 0] for idx in actuator_ids]
def _set_field(self, sim):
actuator_ids = [
sim.model.actuator_name2id(name) for name in self._actuator_names
]
val = self._orig_value * loguniform(
self.unwrapped._random_state,
self._kp_range[0],
self._kp_range[1],
size=[len(actuator_ids)],
)
sim.model.actuator_gainprm[actuator_ids, 0] = val.copy()
return OrderedDict([("actuator_kp", val.copy())])
class ActionNoiseWrapper(gym.ActionWrapper):
def __init__(self, env=None, multiplicative=0.03, additive=0.03, uncorrelated=0.1):
super().__init__(env)
self._multiplicative = multiplicative
self._additive = additive
self._uncorrelated = uncorrelated
def reset(self, *args, **kwargs):
observation = self.env.reset(*args, **kwargs)
self._multiplicative_bias = (
1.0
+ self.unwrapped._random_state.randn(self.action_space.shape[0])
* self._multiplicative
)
self._additive_bias = (
self.unwrapped._random_state.randn(self.action_space.shape[0])
* self._additive
)
return observation
def action(self, action):
new_action = action * self._multiplicative_bias + self._additive_bias
new_action += (
self.unwrapped._random_state.randn(self.action_space.shape[0])
* self._uncorrelated
)
return new_action
def update_parameters(self, multiplicative, additive, uncorrelated):
self._multiplicative = multiplicative
self._additive = additive
self._uncorrelated = uncorrelated
class BacklashWrapper(gym.Wrapper):
"""
Simulates bashlash. coef controls how much of a backlash we have
coef=0 - there is an infinite backlash
coef=np.inf - backlash is ineffective. coef=exp(4.25) ~ 70. acts almost like np.inf.
There is a different coefficient on tendon pulling up vs tensdon pulling down (as there
are two tendons). Both coef_down_log and coef_up_log of actuators are in log scale. The
actual backlash is np.exp(coef_down_log) and np.exp(coef_up_log).
Args:
- std (float): we sample coef_log per episode with this standard deviation.
"""
def __init__(self, env, std=0.1):
super().__init__(env)
self.coef_down_log = np.array(
[
4.25, # A_WRJ1
4.25, # A_WRJ0
2.93, # A_FFJ3
4.25, # A_FFJ2
4.25, # A_FFJ1
4.25, # A_MFJ3
4.25, # A_MFJ2
1.92, # A_MFJ1
4.25, # A_RFJ3
3.35, # A_RFJ2
4.25, # A_RFJ1
4.25, # A_LFJ4
4.25, # A_LFJ3
3.87, # A_LFJ2
1.39, # A_LFJ1
4.25, # A_THJ4
1.25, # A_THJ3
4.25, # A_THJ2
4.25, # A_THJ1
4.25,
] # A_THJ0
)
self.coef_up_log = np.array(
[
4.25, # A_WRJ1
4.25, # A_WRJ0
4.25, # A_FFJ3
4.25, # A_FFJ2
1.86, # A_FFJ1
4.25, # A_MFJ3
4.25, # A_MFJ2
1.44, # A_MFJ1
4.25, # A_RFJ3
2.98, # A_RFJ2
2.07, # A_RFJ1
4.25, # A_LFJ4
4.25, # A_LFJ3
2.94, # A_LFJ2
1.41, # A_LFJ1
2.82, # A_THJ4
1.53, # A_THJ3
4.25, # A_THJ2
2.86, # A_THJ1
2.10,
] # A_THJ0
)
self.slack = None
self.std = std
def reset(self, *args, **kwargs):
self.slack = np.zeros(len(self.env.unwrapped.sim.model.actuator_names))
ob = self.env.reset(*args, **kwargs)
rand = self.unwrapped._random_state
shape = self.coef_up_log.shape
self.episode_coef_down = np.exp(
self.coef_down_log * (1.0 + rand.randn(*shape) * self.std)
)
self.episode_coef_up = np.exp(
self.coef_up_log * (1.0 + rand.randn(*shape) * self.std)
)
# Otherwise, backlash is so huge that robot is useless.
self.episode_coef_down = np.maximum(self.episode_coef_down, 2.0)
self.episode_coef_up = np.maximum(self.episode_coef_up, 2.0)
return ob
def step(self, action):
# ctrl in space.
sim = self.env.unwrapped.sim
self.env.unwrapped._set_action(action)
ctrl = sim.data.ctrl
qpos_as_ctrl = self._qpos2ctrl(sim, sim.data.qpos)
# use kp and vel.
dt = sim.model.opt.timestep * sim.nsubsteps
diff = ctrl - qpos_as_ctrl
eps = 1e-5
incr = (diff < -eps) * diff * self.episode_coef_down * dt + (
diff > eps
) * diff * self.episode_coef_up * dt
alpha = np.abs(np.sign(diff) - self.slack) / (np.abs(incr) + 1e-12)
alpha = np.clip(alpha, 0.0, 1.0)
ctrl = alpha * qpos_as_ctrl + (1.0 - alpha) * ctrl
# Ensures that backlash behaves proportionally to elapsed time.
self.slack += incr
self.slack = np.clip(self.slack, -1.0, 1.0)
action = self._ctrl2action(
sim, ctrl, self.env.unwrapped.constants.relative_action
)
return self.env.step(action)
def _get_actuation_center(self, sim, ctrl, relative_action=False):
ctrlrange = sim.model.actuator_ctrlrange
if relative_action:
actuation_center = np.zeros_like(ctrl)
for i in range(sim.data.ctrl.shape[0]):
actuation_center[i] = sim.data.get_joint_qpos(
sim.model.actuator_names[i].replace(":A_", ":")
)
for joint_name in ["FF", "MF", "RF", "LF"]:
act_idx = sim.model.actuator_name2id("robot0:A_%sJ1" % joint_name)
actuation_center[act_idx] += sim.data.get_joint_qpos(
"robot0:%sJ0" % joint_name
)
else:
actuation_center = (ctrlrange[:, 1] + ctrlrange[:, 0]) / 2.0
return actuation_center
def _ctrl2action(self, sim, ctrl, relative_action=False):
ctrlrange = sim.model.actuator_ctrlrange
actuation_range = (ctrlrange[:, 1] - ctrlrange[:, 0]) / 2.0
actuation_center = self._get_actuation_center(sim, ctrl, relative_action)
action = (ctrl - actuation_center) / actuation_range
return action
def _qpos2ctrl(self, sim, qpos):
action = np.zeros(len(sim.model.actuator_names))
for act_idx, act_name in enumerate(sim.model.actuator_names):
joint_name = act_name.replace(":A_", ":")
jnt_idx = sim.model.get_joint_qpos_addr(joint_name)
action[act_idx] += qpos[jnt_idx]
for suffix in ["FFJ1", "MFJ1", "RFJ1", "LFJ1"]:
if suffix in joint_name:
jnt_idx = sim.model.get_joint_qpos_addr(
joint_name.replace("J1", "J0")
)
action[act_idx] += qpos[jnt_idx]
return action
def update_parameters(self, std):
self.std = std
class ActionDelayWrapper(gym.Wrapper):
"""
Delay in miliseconds, and its standard deviation.
"""
def __init__(
self,
env,
delay=30.0,
per_episode_std=0.1,
per_step_std=0.002,
random_state=None,
):
"""
:param env: Env to be wrapped.
:param delay: Amount of delay in milisecond.
:param std: standard deviation of the delay.
"""
super().__init__(env)
self.delay = delay
self.per_episode_std = per_episode_std
self.per_step_std = per_step_std
self.nsubsteps = None # Default value of nsubsteps from a simulator
self.timestep = None # Default value of timestep from a simulator.
self.total_length_ms = None # How long takes the entire step.
self.last_action = None
self.random_state = random_state or self.unwrapped._random_state
self.reset()
def reset(self, *args, **kwargs):
self.last_action = None
sim = self.env.unwrapped.sim
random_normal = self.random_state.normal()
self.per_episode_delay = self.delay * (
1.0 + random_normal * self.per_episode_std
)
if self.nsubsteps is None or self.timestep is None:
self.nsubsteps = sim.nsubsteps
self.timestep = sim.model.opt.timestep
self.total_length_ms = self.timestep * self.nsubsteps * 1000
return self.env.reset(*args, **kwargs)
def step(self, action):
if self.last_action is None:
self.last_action = action.copy()
sim = self.env.unwrapped.sim
random_normal = self.random_state.normal()
delay = self.per_episode_delay * (1.0 + random_normal * self.per_step_std)
if delay > 1e-4:
delay = self._clip_delay(delay)
self._set_delay(delay)
self.env.step(self.last_action.copy()) # This step takes 'delay' time
else:
delay = 0.0
remaining_delay = self.total_length_ms - delay
self._set_delay(self._clip_delay(remaining_delay))
obs = self.env.step(action) # This step takes 'normal length' - delay time.
self.last_action = action.copy()
# Set back values.
sim.nsubsteps = self.nsubsteps
sim.model.opt.timestep = self.timestep
return obs
def _clip_delay(self, delay):
delay = max(0.05 * self.total_length_ms, delay)
delay = min(self.total_length_ms, delay)
return delay
def _set_delay(self, delay):
"""
sets nsubsteps and timestep to for step to take 'delay'
"""
sim = self.env.unwrapped.sim
delay_nsubsteps = int(delay / self.timestep / 1000)
assert delay_nsubsteps >= 1, "This delay cannot be modeled within step."
delay_timestep = delay / (delay_nsubsteps * 1000)
assert np.abs((delay_nsubsteps * delay_timestep * 1000 - delay) / delay) < 1e-3
sim.nsubsteps = delay_nsubsteps
sim.model.opt.timestep = delay_timestep
def update_parameters(self, delay, per_episode_std, per_step_std):
self.delay = delay
self.per_episode_std = per_episode_std
self.per_step_std = per_step_std
class ObservationDelayWrapper(gym.ObservationWrapper):
"""
Wrapper to simulate observation delay which is defined as
delay between true observation timestamp and the timestamp
when observation is obtained and used to calculate action.
"""
class Interpolator:
def interpolate(self, x1, x2, t):
raise NotImplementedError
class LinearInterpolator(Interpolator):
def interpolate(self, x1, x2, t):
assert 0 <= t <= 1
return x1 * t + x2 * (1 - t)
class QuatInterpolator(Interpolator):
def interpolate(self, x1, x2, t):
return quat_average([x1, x2], [t, 1 - t])
class RadianInterpolator(Interpolator):
def interpolate(self, x1, x2, t):
assert 0 <= t <= 1
diff = normalize_angles(x2 - x1)
return normalize_angles(x2 - t * diff)
def __init__(self, env, levels):
"""
:param env: Env to be wrapped.
:param levels: Delay levels for each observation. Example structure is:
{
"interpolators" {
"cube_quat": "QuatInterpolator"
},
"groups": {
"vision": {
# Group of observations which same delay will be applied.
"obs_names": ["cube_pos", "cube_pot"],
# mean for delay in number of steps.
"mean": 2,
# std for delay in number steps.
"std": 1,
},
"giiker": {
"obs_names": ["cube_face_angle"],
"mean": 3,
"std": 1.5,
}
}
}
"""
super().__init__(env)
self.groups = levels["groups"]
self.interpolators = {
obs_name: getattr(self, interpolator)()
for obs_name, interpolator in levels["interpolators"].items()
}
new_spaces = self.env.observation_space.spaces.copy()
for name in self.group_names:
new_spaces.update(
{
f"noisy_{k}": self.env.observation_space.spaces[k]
for k in self.groups[name]["obs_names"]
}
)
self.observation_space = Dict(new_spaces)
self.default_interpolator = self.LinearInterpolator()
self.random_state = self.unwrapped._random_state
self.prev_obs = deque(maxlen=10)
@property
def group_names(self):
return sorted(self.groups.keys())
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
self.prev_obs.clear()
return self.observation(obs)
def observation(self, observation):
self.prev_obs.append(observation)
obs = observation.copy()
for name in self.group_names:
group = self.groups[name]
delay = self.random_state.normal(group["mean"], group["std"])
delay = np.clip(delay, 0.0, len(self.prev_obs) - 1)
delay_l = math.floor(delay)
delay_h = math.ceil(delay)
t = delay - delay_l
obs_l = self.prev_obs[-1 - delay_l]
obs_h = self.prev_obs[-1 - delay_h]
for obs_name in group["obs_names"]:
new_obs_name = f"noisy_{obs_name}"
assert new_obs_name not in obs, (
f"Noisy value for {obs_name} already exists. Please make sure "
f"observation delay wrapper is applied before other observation "
f"noise wrappers."
)
interpolator = self._get_interpolator(obs_name)
obs[new_obs_name] = interpolator.interpolate(
obs_h[obs_name], obs_l[obs_name], t
)
return obs
def _get_interpolator(self, obs_name):
if obs_name in self.interpolators:
return self.interpolators[obs_name]
else:
return self.default_interpolator
def update_parameters(self, params):
for name, (mean, std) in params.items():
self.groups[name]["mean"] = mean
self.groups[name]["std"] = std
class RandomizedBrokenActuatorWrapper(gym.ActionWrapper):
def __init__(
self, env=None, proba_broken=0.001, max_broken_actuators=2, uncorrelated=0.05
):
"""We mark whether an actuator as broken at each reset.
The probability of all actuators being healthy is ~ 0.98 = (1-0.001) ** 20.
We fake the broken actuator effect by overwriting the action for that actuator to
0.0 + white noise.
Args:
- proba_broken (float): probability of one actuator being broken.
- max_broken_actuators (int): only this number of actuators can be broken at the same
time at maximum.
- uncorrelated (float): white noise on the zero action for broken actuators.
"""
super().__init__(env)
self._uncorrelated = uncorrelated
self._proba_broken = proba_broken
self._max_broken_actuators = max_broken_actuators
self._broken_aids = []
self._broken_action = 0.0
def reset(self, *args, **kwargs):
# Potentially we can change the default actions for broken actuators here.
observation = self.env.reset(*args, **kwargs)
n_actuators = len(self.unwrapped.sim.model.actuator_names)
self._broken_aids = [
i
for i in range(n_actuators)
if self.unwrapped._random_state.rand() < self._proba_broken
]
if len(self._broken_aids) > self._max_broken_actuators:
self._broken_aids = self.unwrapped._random_state.choice(
self._broken_aids, self._max_broken_actuators, replace=False
)
return observation
def action(self, action):
# in make_env(), relative_action=True by default.
new_action = action.copy()
for i in self._broken_aids:
white_noise = self.unwrapped._random_state.rand() * self._uncorrelated
new_action[i] = self._broken_action + white_noise
return new_action
| 44,567 | 35.741962 | 99 | py |
robogym | robogym-master/robogym/wrappers/face.py | from robogym.wrappers import randomizations
class RandomizedFaceDampingWrapper(randomizations.RandomizedDampingWrapper):
def __init__(self, env=None, damping_range=[1 / 3.0, 3.0], object_name="cube"):
joint_names = [
object_name + ":" + name for name in env.unwrapped.face_joint_names
]
super().__init__(env, damping_range, joint_names)
| 379 | 37 | 83 | py |
robogym | robogym-master/robogym/wrappers/named_wrappers.py | import logging
from gym.wrappers import * # noqa # type: ignore
from .cube import * # noqa # type: ignore
from .dactyl import * # noqa # type: ignore
from .face import * # noqa # type: ignore
from .parametric import * # noqa # type: ignore
from .randomizations import * # noqa # type: ignore
from .util import * # noqa # type: ignore
logger = logging.getLogger(__name__)
def apply_named_wrappers(env, wrappers):
# lazy init to avoid import loop
# import all wrappers so that they can be referred to without qualification
for wrapper in wrappers:
wrapper_args = {} if (len(wrapper) == 1 or wrapper[1] is None) else wrapper[1]
logger.info("Adding Wrapper %s with args %s" % (wrapper[0], wrapper_args))
env = eval(wrapper[0])(env, **wrapper_args)
return env
def edit_wrappers(*, wrappers, insert_above=[], insert_below=[], replace=[], delete=[]):
"""
Edit list of wrappers with 4 operations. Order of operations is insert_above, insert_below, replace, delete.
Args:
- insert_above (list): list of lists, where each item contains wrapper name where we are inserting above and the wrapper to insert.
e.g. insert_above=[["RandomizedCubeSizeWrapper", ["RandomizedTimestepWrapper", wrapper_args_dict]], ...]
- insert_below (list): same as insert_above except inserts below
- replace (list): same as insert_above syntax except replaces the target wrapper
- delete (list): list of wrapper names. e.g. delete=["RandomizedCubeSizeWrapper", "RandomizedTimestepWrapper"] to turn of those two wrappers
Returns: list of wrappers
"""
# Insert Above
for _insert_above in insert_above:
try:
ind = [wrapper[0] for wrapper in wrappers].index(_insert_above[0])
wrappers.insert(ind, _insert_above[1])
except ValueError:
logger.warning(_insert_above[0] + " not found in wrappers!!!")
assert False
# Insert Below
for _insert_below in insert_below:
try:
ind = [wrapper[0] for wrapper in wrappers].index(_insert_below[0]) + 1
wrappers.insert(ind, _insert_below[1])
except ValueError:
logger.warning(_insert_below[0] + " not found in wrappers!!!")
assert False
# Replace
for _replace in replace:
try:
ind = [wrapper[0] for wrapper in wrappers].index(_replace[0])
wrappers[ind] = _replace[1]
except ValueError:
logger.warning(_replace[0] + " not found in wrappers!!!")
assert False
# Delete
for _delete in delete:
try:
ind = [wrapper[0] for wrapper in wrappers].index(_delete)
wrappers.pop(ind)
except ValueError:
logger.warning(_delete + " not found in wrappers!!!")
assert False
return wrappers
def find_wrapper(env_top, search_string):
"""
recursively search for env wrapper containing the given string
:param env_top: top-level environment
:param search_string: (string) string to find in wrapper class name
:return: environment, (optional) stack of searched environments
"""
stack = []
curr_env = env_top
assert curr_env is not env_top.unwrapped
while search_string not in curr_env.class_name():
stack.append(curr_env)
curr_env = curr_env.env
assert curr_env is not env_top.unwrapped
assert search_string in curr_env.class_name()
return curr_env, stack
| 3,536 | 36.62766 | 148 | py |
robogym | robogym-master/robogym/wrappers/util.py | import enum
from collections import OrderedDict
from copy import deepcopy
import gym
import numpy as np
from gym.spaces import Box, Dict
def update_obs_space(env, delta):
spaces = env.observation_space.spaces.copy()
for key, shape in delta.items():
spaces[key] = Box(-np.inf, np.inf, (np.prod(shape),), np.float32)
return Dict(spaces)
class BinSpacing(enum.Enum):
"""
An Enum class ti generate action bin spacing arrays.
"""
LINEAR = "linear"
EXPONENTIAL = "exponential" # Exponential binning. Expects a symmetric action space centered around zero
def get_bin_array(self, lower_bound, upper_bound, n_bins) -> np.ndarray:
if self is BinSpacing.LINEAR:
return np.linspace(lower_bound, upper_bound, n_bins)
else:
assert (
lower_bound == -upper_bound and n_bins % 2 == 1
), "Exponential binning is only supported on symmetric action space with an odd number of bins"
half_range = np.array([2 ** (-n) for n in range(n_bins // 2)]) * lower_bound
return np.concatenate([half_range, [0], -half_range[::-1]])
class DiscretizeActionWrapper(gym.ActionWrapper):
"""
A wrapper that maps a continuous gym action space into a discrete action space.
"""
# default action bins for DiscretizeActionWrapper
DEFAULT_BINS = 11
def __init__(
self, env=None, n_action_bins=DEFAULT_BINS, bin_spacing=BinSpacing.LINEAR
):
"""
n_action_bins: can be int or None
if None is passed, then DEFAULT_BINS will be used.
"""
super().__init__(env)
assert isinstance(env.action_space, Box)
self._disc_to_cont = []
if n_action_bins is None:
n_action_bins = self.DEFAULT_BINS
for low, high in zip(env.action_space.low, env.action_space.high):
self._disc_to_cont.append(
bin_spacing.get_bin_array(low, high, n_action_bins)
)
temp = [n_action_bins for _ in self._disc_to_cont]
self.action_space = gym.spaces.MultiDiscrete(temp)
self.action_space.seed(env.action_space.np_random.randint(0, 2 ** 32 - 1))
def action(self, action):
assert len(action) == len(self._disc_to_cont)
return np.array(
[m[a] for a, m in zip(action, self._disc_to_cont)], dtype=np.float32
)
class RewardNameWrapper(gym.Wrapper):
""" Sets the default reward name on the environment """
def __init__(self, env):
super().__init__(env)
unwrapped = self.env.unwrapped
if not hasattr(unwrapped, "reward_names"):
self.env.unwrapped.reward_names = ["env"]
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipObservationWrapper(gym.ObservationWrapper):
"""
Clips observations into a fixed range.
"""
def __init__(self, env=None, clip=100.0):
super().__init__(env)
self._clip = clip
def observation(self, observation):
clipped_observation = OrderedDict()
for key in observation:
clipped_observation[key] = np.clip(
observation[key], -self._clip, self._clip
)
return clipped_observation
def compute_relative_goals(self, *args, **kwargs):
self.env.compute_relative_goals(*args, **kwargs)
def compute_goal_reward(self, *args, **kwargs):
return self.env.compute_goal_reward(*args, **kwargs)
class ClipRewardWrapper(gym.RewardWrapper):
"""
Clips reward values into a fixed range.
"""
def __init__(self, env=None, clip=100.0):
super().__init__(env)
self._clip = clip
def reward(self, reward):
clipped_reward = np.clip(reward, -self._clip, self._clip)
return clipped_reward
def compute_relative_goals(self, *args, **kwargs):
self.env.compute_relative_goals(*args, **kwargs)
def compute_goal_reward(self, *args, **kwargs):
return self.env.compute_goal_reward(*args, **kwargs)
class ClipActionWrapper(gym.ActionWrapper):
""" Clips action values into a normalized space between -1 and 1"""
def action(self, action):
return np.clip(a=action, a_min=-1.0, a_max=1.0)
class IncrementalExpAvg(object):
""" A generic exponential moving average filter. """
def __init__(self, alpha, intial_value=None):
self._value = 0
self._t = 0
self._alpha = alpha
if intial_value is not None:
self.update(intial_value)
def update(self, observation):
self._value = self._value * self._alpha + (1 - self._alpha) * observation
self._t += 1
def get(self):
if self._value is None:
return None
else:
return self._value / (1 - self._alpha ** self._t)
class PreviousActionObservationWrapper(gym.Wrapper):
"""
Wrapper that annotates observations with a cached previous action.
"""
def __init__(self, env=None):
super().__init__(env)
env.observation_space.spaces["previous_action"] = deepcopy(env.action_space)
def reset(self, *args, **kwargs):
self.previous_action = np.zeros(self.env.action_space.shape)
return self.observation(self.env.reset(*args, **kwargs))
def observation(self, observation):
observation["previous_action"] = self.previous_action.copy()
return observation
def step(self, action):
self.previous_action = action.copy()
ob, rew, done, info = self.env.step(action)
return self.observation(ob), rew, done, info
def compute_relative_goals(self, *args, **kwargs):
self.env.compute_relative_goals(*args, **kwargs)
def compute_goal_reward(self, *args, **kwargs):
return self.env.compute_goal_reward(*args, **kwargs)
class SmoothActionWrapper(gym.Wrapper):
"""
Applies smoothing to the current action using an Exponential Moving Average filter.
"""
def __init__(self, env, alpha=0.0):
super().__init__(env)
self._alpha = alpha
delta = OrderedDict([("action_ema", self.env.action_space.shape)])
self.observation_space = update_obs_space(self.env, delta)
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
sim = self.unwrapped.sim
adjusted_alpha = np.power(
self._alpha, (sim.model.opt.timestep * sim.nsubsteps) / 0.08
)
self._ema = IncrementalExpAvg(alpha=adjusted_alpha)
obs["action_ema"] = np.zeros(self.env.action_space.shape)
return obs
def step(self, action):
self._ema.update(action)
action = self._ema.get()
obs, rew, done, info = self.env.step(action)
obs["action_ema"] = action
return obs, rew, done, info
class RelativeGoalWrapper(gym.ObservationWrapper):
"""
Wrapper that computes the 'relative goal' and 'achieved goal' observations for
environments.
"""
def __init__(self, env, obs_prefix=""):
# Prefix to map goal observation to state observation. This is a hack to
# handle inconsistent naming convention for cube environment observations
# e.g. goal_pos goal observation maps to cube_pos state observation.
self.obs_prefix = obs_prefix
super().__init__(env)
self.goal_obs_names = []
delta = OrderedDict()
for name, space in self.env.observation_space.spaces.items():
if name.startswith("goal_"):
delta[f"achieved_{name}"] = space.shape
delta[f"relative_{name}"] = space.shape
delta[f"noisy_achieved_{name}"] = space.shape
delta[f"noisy_relative_{name}"] = space.shape
obs_name = name[len("goal_"):]
assert (
f"{self.obs_prefix}{obs_name}" in self.env.observation_space.spaces
), (
f"Found {name} but not {self.obs_prefix}{obs_name} in observation space. "
f"RelativeGoalWrapper won't work. Available observation space: "
f"{sorted(self.env.observation_space.spaces.keys())}"
)
self.goal_obs_names.append(obs_name)
self.observation_space = update_obs_space(self.env, delta)
def observation(self, observation):
""" Calculate 'relative goal' and 'achieved goal' """
current_state = {
f"{self.obs_prefix}{n}": observation[f"{self.obs_prefix}{n}"]
for n in self.goal_obs_names
}
noisy_goal_state = {
f"{self.obs_prefix}{n}": observation[f"noisy_{self.obs_prefix}{n}"]
for n in self.goal_obs_names
}
relative_goal = self.env.unwrapped.goal_generation.relative_goal(
self.env.unwrapped._goal, current_state
)
noisy_relative_goal = self.env.unwrapped.goal_generation.relative_goal(
self.env.unwrapped._goal, noisy_goal_state
)
for name in self.goal_obs_names:
obs_name = f"{self.obs_prefix}{name}"
observation[f"achieved_goal_{name}"] = observation[obs_name].copy()
observation[f"relative_goal_{name}"] = relative_goal[obs_name]
observation[f"noisy_achieved_goal_{name}"] = observation[
f"noisy_{obs_name}"
].copy()
observation[f"noisy_relative_goal_{name}"] = noisy_relative_goal[obs_name]
return observation
class UnifiedGoalObservationWrapper(gym.ObservationWrapper):
"""Concatenates the pieces of every goal type"""
def __init__(
self, env, goal_keys=["relative_goal", "achieved_goal", "goal"], goal_parts=[],
):
super().__init__(env)
self.delta = OrderedDict()
for goal_key in goal_keys:
goal_len = sum(
[
self.observation_space.spaces[key].shape[0]
for key in self.observation_space.spaces.keys()
if key.startswith(goal_key)
]
)
self.delta[goal_key] = (goal_len,)
if any(
[
key.startswith("noisy_" + goal_key + "_")
for key in self.observation_space.spaces.keys()
]
):
self.delta["noisy_" + goal_key] = (goal_len,)
self.goal_parts = goal_parts
self.observation_space = update_obs_space(self.env, self.delta)
def observation(self, observation):
new_obs = OrderedDict()
for key, value in observation.items():
new_obs[key] = value
# It's a bit hacky to hard code observation key here but we have to do it now
# because we need to keep old policy backward compatible by keep observation order
# the same.
for goal_key in self.delta.keys():
goal_parts = [goal_key + "_" + part for part in self.goal_parts]
goal = np.concatenate(
[observation[key] for key in goal_parts if key in observation]
)
new_obs[goal_key] = goal
return new_obs
class SummedRewardsWrapper(gym.RewardWrapper):
"""
Ensures that reward is a scalar.
"""
def reward(self, reward):
return np.sum([reward])
| 11,436 | 32.247093 | 109 | py |
robogym | robogym-master/robogym/wrappers/dactyl.py | from collections import OrderedDict
import gym
import numpy as np
from robogym.robot.shadow_hand.hand_forward_kinematics import (
FINGERTIP_SITE_NAMES,
REFERENCE_SITE_NAMES,
)
from robogym.utils.sensor_utils import check_occlusion, occlusion_markers_exist
from robogym.wrappers import randomizations
class RandomizedPhasespaceFingersWrapper(randomizations.RandomizedBodyWrapper):
def __init__(self, env=None, fingertips_noise=0.003, reference_noise=0.001):
"""Randomize position of phasespace markers on fingers. Units in meters."""
super().__init__(env)
self._all_sites = [
(f"robot0:{name}", fingertips_noise) for name in FINGERTIP_SITE_NAMES
]
self._all_sites += [
(f"robot0:{name}", reference_noise) for name in REFERENCE_SITE_NAMES
]
def _get_observation_space_delta(self, sim):
site_idxes = [
sim.model.site_name2id(f"robot0:{c}")
for c in FINGERTIP_SITE_NAMES + REFERENCE_SITE_NAMES
]
return OrderedDict(
[("randomized_phasespace", sim.model.site_pos[site_idxes, :].shape)]
)
def _get_field(self, sim):
orig_pos = [None for _ in self._all_sites]
for idx, (name, noise) in enumerate(self._all_sites):
sensor_idx = sim.model.site_name2id(name)
orig_pos[idx] = sim.model.site_pos[sensor_idx, :].copy()
return np.array(orig_pos)
def _set_field(self, sim):
randomized_phasespace = []
for idx, (name, noise) in enumerate(self._all_sites):
sensor_idx = sim.model.site_name2id(name)
sim.model.site_pos[sensor_idx, :] = self._orig_value[
idx
] + self.unwrapped._random_state.uniform(-noise, noise, size=(3,))
randomized_phasespace.append(sim.model.site_pos[sensor_idx, :])
randomized_phasespace = np.array(randomized_phasespace, copy=True)
return OrderedDict([("randomized_phasespace_fingers", randomized_phasespace)])
class FingersOccludedPhasespaceMarkers(gym.ObservationWrapper):
def __init__(self, env):
"""Make phasespace markers disappear when the occlusion detectors have collision,
which is simulated by returning old phasespace values.
This relies on `RandomizeObservationWrapper` with "fingertip_pos" in the input
"levels".
"""
super().__init__(env)
self._key = "noisy_fingertip_pos"
self._n_markers = 5
self._obs_buffer = None
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
self._occlusion_markers_exist = occlusion_markers_exist(self.unwrapped.sim)
assert len(obs[self._key]) % 3 == 0
assert len(obs[self._key]) // 3 == self._n_markers
self._obs_buffer = obs[self._key].copy()
return obs
def observation(self, observation):
if not self._occlusion_markers_exist:
return observation
else:
new_observation = OrderedDict()
for key in observation:
new_observation[key] = observation[key]
# Freeze the fingertip_pos read if the finger is occluded.
is_occluded_list = check_occlusion(self.unwrapped.sim)
for i, is_occluded in enumerate(is_occluded_list):
if not is_occluded:
self._obs_buffer[3 * i: 3 * (i + 1)] = observation[self._key][
3 * i: 3 * (i + 1)
]
new_observation[self._key] = self._obs_buffer.copy()
self._obs_buffer = new_observation[self._key].copy()
return new_observation
class FingersFreezingPhasespaceMarkers(randomizations.FreezingPhasespaceMarkers):
def __init__(
self,
env=None,
key="noisy_fingertip_pos",
disappear_p_1s=0.2,
freeze_scale_s=1.0,
):
super().__init__(
env, key=key, disappear_p_1s=disappear_p_1s, freeze_scale_s=freeze_scale_s
)
class FingerSeparationWrapper(gym.Wrapper):
""" Immobilize and separate all fingers other than active finger. """
def __init__(self, env, active_finger):
super().__init__(env)
self.active_finger = active_finger
self.FINGERS = ("TH", "FF", "MF", "RF", "LF", "WR")
def reset(self, *args, **kwargs):
# Spreads fingers apart
finger_i = self.FINGERS.index(self.active_finger)
for i in range(len(self.FINGERS)):
if "F" in self.FINGERS[i] and i != finger_i:
if i < finger_i:
limit = 0
elif i > finger_i:
limit = 1
self._freeze_joint("{}J4".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J3".format(self.FINGERS[i]), limit)
self._freeze_joint("{}J2".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J1".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J0".format(self.FINGERS[i]), 1)
if "TH" in self.FINGERS[i] and i != finger_i:
self._freeze_joint("{}J4".format(self.FINGERS[i]), 0)
self._freeze_joint("{}J3".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J2".format(self.FINGERS[i]), 1)
self._freeze_joint("{}J1".format(self.FINGERS[i]), 0)
self._freeze_joint("{}J0".format(self.FINGERS[i]), 0)
return self.env.reset(*args, **kwargs)
def _freeze_joint(self, joint_name, limit):
if limit == 0:
diff = -0.01
else:
diff = 0.01
model = self.env.unwrapped.sim.model
if "robot0:" + joint_name in model.joint_names:
joint_id = model.joint_name2id("robot0:" + joint_name)
model.jnt_range[joint_id, limit] = (
model.jnt_range[joint_id, 1 - limit] + diff
)
class RandomizedRobotDampingWrapper(randomizations.RandomizedDampingWrapper):
def __init__(self, env=None, damping_range=[1 / 1.5, 1.5], robot_name="robot0"):
joint_names = [
name
for name in env.unwrapped.sim.model.joint_names
if name.startswith(robot_name + ":")
]
super().__init__(env, damping_range, joint_names)
class RandomizedRobotKpWrapper(randomizations.RandomizedKpWrapper):
def __init__(self, env=None, kp_range=[0.5, 2.0], robot_name="robot0"):
actuator_names = [
name
for name in env.unwrapped.sim.model.actuator_names
if name.startswith(robot_name + ":")
]
super().__init__(env, kp_range, actuator_names)
class FixedWristWrapper(gym.Wrapper):
def __init__(self, env=None, wrj0_pos=0.0):
self.wrj0_pos = wrj0_pos
super().__init__(env)
def reset(self, *args, **kwargs):
return self.env.reset(*args, **kwargs)
def step(self, action):
a_wrj0_id = self.env.unwrapped.sim.model.actuator_name2id("robot0:A_WRJ0")
ctrlrange = self.env.unwrapped.sim.model.actuator_ctrlrange[a_wrj0_id]
actuation_range = (ctrlrange[1] - ctrlrange[0]) / 2.0
joint_pos = self.env.unwrapped.sim.data.get_joint_qpos("robot0:WRJ0")
action[a_wrj0_id] = (self.wrj0_pos - joint_pos) / actuation_range
return self.env.step(action)
class RewardObservationWrapper(gym.Wrapper):
def __init__(self, env=None, reward_inds=None):
super().__init__(env)
self.reward_inds = reward_inds
self.shape = (len(reward_inds),) if reward_inds is not None else (1,)
env.observation_space.spaces["reward"] = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=self.shape, dtype=np.float32
)
def reset(self, *args, **kwargs):
obs = self.env.reset(*args, **kwargs)
return self.observation(obs, None)
def observation(self, observation, reward):
observation["reward"] = self._reward_obs(reward)
return observation
def step(self, action):
ob, rew, done, info = self.env.step(action)
return self.observation(ob, rew), rew, done, info
def _reward_obs(self, reward):
if reward is None: # this should only be the case on reset
obs = np.zeros(self.shape)
else:
if (
self.reward_inds is None
): # This should only be the case when reward is a scalar
obs = np.array([reward])
else:
obs = np.array(reward[self.reward_inds])
return obs
DEFAULT_NOISE_LEVELS = {
"achieved_goal": {"uncorrelated": 0.001, "additive": 0.001},
}
| 8,676 | 37.22467 | 89 | py |
robogym | robogym-master/robogym/wrappers/tests/test_randomizations.py | import numpy as np
import pytest
from mock import patch
from numpy.testing import assert_almost_equal
from robogym.envs.dactyl.full_perpendicular import make_simple_env
from robogym.envs.dactyl.locked import make_env as make_env_locked
from robogym.envs.dactyl.reach import make_simple_env as make_reach_env
from robogym.mujoco.helpers import joint_qpos_ids_from_prefix
from robogym.utils import rotation
from robogym.utils.dactyl_utils import actuated_joint_range
from robogym.wrappers.dactyl import (
FingersFreezingPhasespaceMarkers,
FingersOccludedPhasespaceMarkers,
RandomizedPhasespaceFingersWrapper,
RandomizedRobotDampingWrapper,
RandomizedRobotKpWrapper,
)
from robogym.wrappers.randomizations import QUAT_NOISE_CORRECTION # noqa
from robogym.wrappers.randomizations import (
ActionDelayWrapper,
ActionNoiseWrapper,
BacklashWrapper,
ObservationDelayWrapper,
RandomizedActionLatency,
RandomizedBrokenActuatorWrapper,
RandomizedCubeFrictionWrapper,
RandomizedGravityWrapper,
RandomizedJointLimitWrapper,
RandomizedRobotFrictionWrapper,
RandomizedTendonRangeWrapper,
RandomizedTimestepWrapper,
RandomizeObservationWrapper,
)
VISUALIZE = False
def test_wrapper_divergence():
"""
This test run the same action in the vanilla dactyl_locked env and the one that is wrapped in
a given wrappers. After some steps, the wrapped env should diverge from the vanilla version.
"""
env_kwargs = {
"n_random_initial_steps": 0,
}
simple_env = make_simple_env(parameters=env_kwargs, starting_seed=0)
dummy_env = make_simple_env(
parameters=env_kwargs, starting_seed=0
) # should be exact same as `simple_env`
# Add you wrappers here!
wrappers_to_test = [
(ActionNoiseWrapper, {}),
(BacklashWrapper, {}),
(FingersOccludedPhasespaceMarkers, {}), # Need 'noisy_fingertip_pos'
(FingersFreezingPhasespaceMarkers, {}), # Need 'noisy_fingertip_pos'
(
RandomizedBrokenActuatorWrapper,
{
"proba_broken": 1.0, # force one broken actuators
"max_broken_actuators": 1,
},
),
(RandomizedRobotFrictionWrapper, {}),
(RandomizedCubeFrictionWrapper, {}),
(RandomizedGravityWrapper, {}),
(RandomizedJointLimitWrapper, {}),
(RandomizedTendonRangeWrapper, {}),
(RandomizedPhasespaceFingersWrapper, {}),
(RandomizedRobotDampingWrapper, {}),
(RandomizedRobotKpWrapper, {}),
(RandomizedTimestepWrapper, {}),
(ActionDelayWrapper, {}),
# With default args, the maximum qpos difference is too small.
(RandomizedActionLatency, {"max_delay": 2}), # default 1
# (RandomizedBodyInertiaWrapper, {}), # default mass_range=[0.5, 1.5]
]
wrapped_envs = []
for wrapper_class, kwargs in wrappers_to_test:
env = make_simple_env(parameters=env_kwargs, starting_seed=0)
if wrapper_class in (
FingersOccludedPhasespaceMarkers,
FingersFreezingPhasespaceMarkers,
):
env = RandomizeObservationWrapper(
env=env,
levels={"fingertip_pos": {"uncorrelated": 0.002, "additive": 0.001}},
)
env = wrapper_class(env=env, **kwargs)
env.reset()
wrapped_envs.append(env)
for i in range(200):
action = np.ones(env.action_space.shape)
simple_env.step(action)
dummy_env.step(action)
for env in wrapped_envs:
env.step(action)
target_qpos_idxs = joint_qpos_ids_from_prefix(
simple_env.unwrapped.sim.model, "target:"
)
kept_indices = set(range(simple_env.unwrapped.sim.data.qpos.shape[0])) - set(
target_qpos_idxs
)
kept_indices = sorted(kept_indices)
def get_non_target_qpos(_env):
return np.array(_env.unwrapped.sim.data.qpos.copy()[kept_indices])
# Make sure the base env is deterministic
assert np.array_equal(
get_non_target_qpos(simple_env), get_non_target_qpos(dummy_env)
)
for env in wrapped_envs:
diffs = np.absolute(get_non_target_qpos(simple_env) - get_non_target_qpos(env))
assert np.max(diffs) > 1e-4, "failed for {}".format(env.__class__.__name__)
assert np.min(diffs) > 0.0, "failed for {}".format(env.__class__.__name__)
def test_randomize_obs_wrapper():
state = np.random.get_state()
try:
np.random.seed(1)
quat_noise_factor = QUAT_NOISE_CORRECTION
# test that randomization of Euler angles and quaternions has same distance
n = 10000
a_bias = 0.1
additive_bias = a_bias * np.random.standard_normal(size=(n, 3))
# multiplicative bias does not make sense for random angles
angle = np.random.uniform(-np.pi, np.pi, size=(n, 3))
new_angle = angle + additive_bias
angle_dist = np.linalg.norm(rotation.subtract_euler(new_angle, angle), axis=-1)
angle = np.random.uniform(-np.pi, np.pi, size=(n, 1))
axis = np.random.uniform(-1.0, 1.0, size=(n, 3))
quat = rotation.quat_from_angle_and_axis(angle, axis)
# double the additive bias to roughly equal the angular distance
noise_angle = a_bias * quat_noise_factor * np.random.standard_normal(size=(n,))
noise_axis = np.random.uniform(-1.0, 1.0, size=(n, 3))
noise_quat = rotation.quat_from_angle_and_axis(noise_angle, noise_axis)
new_quat = rotation.quat_mul(quat, noise_quat)
quat_diff = rotation.quat_difference(quat, new_quat)
quat_dist = rotation.quat_magnitude(quat_diff)
mean_angle_dist = np.mean(angle_dist)
mean_quat_dist = np.mean(quat_dist)
assert ((mean_angle_dist - mean_quat_dist) / mean_angle_dist) < 0.01
finally:
np.random.set_state(state)
def test_randomize_observation_wrapper():
simple_env = make_simple_env()
simple_env.reset()
env = RandomizeObservationWrapper(
env=simple_env, levels={"cube_pos": {"uncorrelated": 0.2, "additive": 0.1}}
)
with patch.object(env, "random_state") as mock_rand:
# Remove randomness in the noise.
mock_rand.randn.side_effect = lambda key_length: np.ones(
key_length, dtype=np.float32
)
def mock_obs(o):
simple_env.observe = lambda: o
mock_obs({"cube_pos": np.array([0.1, 0.2, 0.3])})
obs = env.reset()
# Make sure noise is applied on noiseless value.
assert_almost_equal(obs["noisy_cube_pos"], [0.4, 0.5, 0.6])
mock_obs(
{
"cube_pos": np.array([0.1, 0.2, 0.3]),
"noisy_cube_pos": np.array([0.2, 0.3, 0.4]),
}
)
# Make sure noise is applied on top of noisy observation when available.
obs = env.reset()
assert_almost_equal(obs["noisy_cube_pos"], [0.5, 0.6, 0.7])
def test_observation_delay_wrapper():
levels = {
"interpolators": {
"cube_quat": "QuatInterpolator",
"cube_face_angle": "RadianInterpolator",
},
"groups": {
"vision": {
"obs_names": ["cube_pos", "cube_quat"],
"mean": 1.5,
"std": 0.0,
},
"giiker": {"obs_names": ["cube_face_angle"], "mean": 1.4, "std": 0.0},
"phasespace": {"obs_names": ["fingertip_pos"], "mean": 1.2, "std": 0.0},
},
}
simple_env = make_simple_env()
simple_env.reset()
env = ObservationDelayWrapper(simple_env, levels)
def mock_obs(o):
simple_env.observe = lambda: o
initial_obs = {
"cube_pos": np.array([0.1, 0.2, 0.3]),
"cube_quat": rotation.euler2quat(np.array([0.0, 0.0, 0.0])),
"cube_face_angle": np.array(
[np.pi - 0.01, np.pi / 2 - 0.01, 0.0, 0.0, 0.0, 0.0]
),
"fingertip_pos": np.array([0.5, 0.6, 0.7]),
}
mock_obs(initial_obs)
env.reset()
second_obs = {
"cube_pos": np.array([0.2, 0.3, 0.4]),
"cube_quat": rotation.euler2quat(np.array([0.8, 0.0, 0.0])),
"cube_face_angle": np.array(
[-np.pi + 0.01, np.pi / 2 + 0.01, 0.0, 0.0, 0.0, 0.0]
),
"fingertip_pos": np.array([0.5, 0.6, 0.7]),
}
mock_obs(second_obs)
obs = env.step(np.zeros(env.action_space.shape))[0]
# Should take the first observation because there are only two observations and nothing
# to interpolate.
for key in initial_obs:
assert_almost_equal(obs[f"noisy_{key}"], initial_obs[key])
# Step env again so obs should be interpolation of initial and second obs.
obs = env.step(np.zeros(env.action_space.shape))[0]
assert_almost_equal(obs["noisy_cube_pos"], [0.15, 0.25, 0.35])
assert_almost_equal(rotation.quat2euler(obs["noisy_cube_quat"]), [0.4, 0.0, 0.0])
assert_almost_equal(
obs["noisy_cube_face_angle"],
[-np.pi + 0.002, np.pi / 2 + 0.002, 0.0, 0.0, 0.0, 0.0],
)
assert_almost_equal(obs["noisy_fingertip_pos"], [0.5, 0.6, 0.7])
def test_observation_wrapper_order():
# Test to make sure observation noise wrappers are applied in correct order.
simple_env = make_simple_env()
simple_env.reset()
simple_env.observe = lambda: {"cube_pos": np.array([0.1, 0.2, 0.3])}
env = RandomizeObservationWrapper(
env=simple_env, levels={"cube_pos": {"uncorrelated": 0.2, "additive": 0.1}}
)
env.reset()
env = ObservationDelayWrapper(
env,
levels={
"interpolators": {},
"groups": {
"vision": {"obs_names": ["cube_pos"], "mean": 1.5, "std": 0.0},
},
},
)
with pytest.raises(AssertionError):
env.step(np.zeros(env.action_space.shape))
@pytest.mark.skip(reason="This test needs to be updated to work properly.")
def test_randomized_joint_range_wrapper_subset():
selected = [
"robot0:WRJ1",
"robot0:FFJ2",
"robot0:FFJ1",
"robot0:FFJ0",
"robot0:MFJ1",
"robot0:MFJ0",
"robot0:THJ2",
"robot0:THJ0",
]
env0 = make_reach_env()
env0.reset()
orig_sim_limits = actuated_joint_range(env0.unwrapped.sim)
env1 = make_reach_env()
env1 = RandomizedJointLimitWrapper(env=env1, joint_names=selected, relative_std=0.3)
env1.reset()
for _ in range(5):
env1.reset()
rand_sim_limits = actuated_joint_range(env1.unwrapped.sim)
for i, jnt_name in enumerate(env1.unwrapped.sim.model.joint_names):
low, high = orig_sim_limits[i]
if jnt_name not in selected:
assert low == rand_sim_limits[i][0] and high == rand_sim_limits[i][1]
else:
assert (low != 0.0) or rand_sim_limits[i][0] >= 0.0
assert (high != 0.0) or rand_sim_limits[i][1] <= 0.0
def test_randomized_broken_actuator_wrapper():
env = make_simple_env()
env.reset()
env = RandomizedBrokenActuatorWrapper(
env=env, proba_broken=0.5, max_broken_actuators=4, uncorrelated=0.0
)
env.reset()
assert len(env._broken_aids) <= 4
# The broken actuators are different after reset.
orig_broken_aids = env._broken_aids.copy()
env.reset()
assert sorted(env._broken_aids) != sorted(orig_broken_aids)
# The action is modified
action = env.action(np.ones(env.action_space.shape)).copy()
for i in range(env.action_space.shape[0]):
if i in env._broken_aids:
assert action[i] == 0.0
else:
assert action[i] == 1.0
def test_replace_cube_obs_vision_wrapper():
# Disabled for now until new models are trained
vision_args = {
"vision_model_path": "projects/vision/experiments/gan-muj-100x100/20180109_18_41/",
}
env = make_env_locked(constants={"randomize": False, "vision_args": vision_args})
env.reset()
env.step(env.action_space.nvec // 2)
def test_action_delay_wrapper_inactive():
env = make_simple_env(starting_seed=0)
env.reset()
# Wrapper calls reset in its __init__ so no need to
# call reset explicitly.
delayed_env = ActionDelayWrapper(
make_simple_env(starting_seed=0),
delay=0.0,
per_episode_std=0.0,
per_step_std=0.0,
random_state=np.random.RandomState(),
)
action = env.action_space.sample()
for _ in range(20):
ob_env, _, _, _ = env.step(action)
ob_delayed_env, _, _, _ = delayed_env.step(action)
for name in ob_env:
assert (
np.mean(np.abs(ob_env[name] - ob_delayed_env[name])) < 1e-6
), "ActionDelayWrapper should be inactive."
| 12,783 | 31.779487 | 97 | py |
robogym | robogym-master/robogym/wrappers/tests/test_dactyl.py | import numpy as np
from mock import patch
from robogym.envs.dactyl.locked import make_simple_env
from robogym.wrappers.dactyl import FingersOccludedPhasespaceMarkers
from robogym.wrappers.randomizations import RandomizeObservationWrapper
@patch("robogym.wrappers.dactyl.check_occlusion")
def test_fingers_occluded_phasespace_markers(mock_check_occlusion):
# Test when a finger is marked as occluded, the phasespace fingertip_pos should stay
# same as the last one.
fake_is_occluded = [0, 1, 0, 0, 1]
mock_check_occlusion.return_value = fake_is_occluded
env = make_simple_env()
env = RandomizeObservationWrapper(
env=env, levels={"fingertip_pos": {"uncorrelated": 0.002, "additive": 0.001}}
)
env = FingersOccludedPhasespaceMarkers(env=env)
action_shape = env.unwrapped.action_space.shape
obs = env.reset()
fingertip_pos = obs["noisy_fingertip_pos"].reshape(5, 3)
for _ in range(20):
obs, _, _, _ = env.step(np.ones(action_shape))
new_fingertip_pos = obs["noisy_fingertip_pos"].reshape(5, 3)
for finger_idx in range(5):
if fake_is_occluded[finger_idx]:
assert (
new_fingertip_pos[finger_idx] == fingertip_pos[finger_idx]
).all()
else:
assert (
new_fingertip_pos[finger_idx] != fingertip_pos[finger_idx]
).all()
fingertip_pos = new_fingertip_pos.copy()
| 1,473 | 36.794872 | 88 | py |
robogym | robogym-master/robogym/wrappers/tests/test_action_wrappers.py | import numpy as np
from robogym.envs.rearrange.blocks import make_env
from robogym.wrappers.util import DiscretizeActionWrapper
class TestDiscretizeActionWrapper:
def test_linear_mapping(self):
n_bins = 11
env = make_env(apply_wrappers=False, constants=dict(n_action_bins=n_bins))
env = DiscretizeActionWrapper(env, n_action_bins=n_bins)
linear_bins = np.linspace(-1, 1, n_bins)
assert np.array_equal(
env._disc_to_cont, [linear_bins] * env.action_space.shape[0]
)
def test_exponential_mapping(self):
n_bins = 11
env = make_env(
apply_wrappers=False,
constants=dict(n_action_bins=n_bins, action_spacing="exponential"),
)
env = DiscretizeActionWrapper(
env, n_action_bins=n_bins, bin_spacing=env.constants.action_spacing
)
exp_bins = np.array(
[-1.0, -0.5, -0.25, -0.125, -0.0625, 0.0, 0.0625, 0.125, 0.25, 0.5, 1.0]
)
assert np.array_equal(
env._disc_to_cont, [exp_bins] * env.action_space.shape[0]
)
| 1,105 | 33.5625 | 84 | py |
robogym | robogym-master/robogym/utils/icp.py | # Copy from https://github.com/ClayFlannigan/icp/blob/master/icp.py
# ICP: Iterative Closest Point which is an algorithm to find optimal rotation
# matrix between two set of point cloud. This file implements vanilla ICP using
# Kabsch algorithm with nearest neighbor matching.
# See https://en.wikipedia.org/wiki/Iterative_closest_point for more variants of
# ICP algorithms
import numpy as np
from sklearn.neighbors import NearestNeighbors
from robogym.utils.mesh import get_vertices_bounding_box
class ICP:
def __init__(self, target_points: np.ndarray, error_threshold: float):
"""
:param target_points: The target point cloud to match against.
:param error_threshold: The error threshold to trust ICP result. This is relative
to bounding box size of target point cloud.
"""
self.error_threshold = (
error_threshold * get_vertices_bounding_box(target_points)[-1]
)
self.target_points = target_points
self.knn = None
def compute(self, points):
"""
Compute optimal rotation matrix. None if error is above threshold.
"""
if self.knn is None:
self.knn = NearestNeighbors(n_neighbors=1)
self.knn.fit(self.target_points)
T, max_error = icp(
points, self.target_points, self.knn, max_iterations=5, tolerance=1e-6,
)
if max_error < self.error_threshold:
return T[:3, :3].T
else:
return None
def best_fit_transform(A, B):
"""
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Nxm numpy array of corresponding points
B: Nxm numpy array of corresponding points
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
"""
assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matrix
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# special reflection case
if np.linalg.det(R) < 0:
Vt[m - 1, :] *= -1
R = np.dot(Vt.T, U.T)
# translation
t = centroid_B.T - np.dot(R, centroid_A.T)
# homogeneous transformation
T = np.identity(m + 1)
T[:m, :m] = R
T[:m, m] = t
return T, R, t
def nearest_neighbor(src, neigh):
"""
Find the nearest (Euclidean) neighbor in dst for each point in src
Input:
src: Nxm array of points
dst: Nxm array of points
Output:
distances: Euclidean distances of the nearest neighbor
indices: dst indices of the nearest neighbor
"""
distances, indices = neigh.kneighbors(src, return_distance=True)
return distances.ravel(), indices.ravel()
def icp(A, B, knn, init_pose=None, max_iterations=20, tolerance=0.001):
"""
The Iterative Closest Point method: finds best-fit transform that maps points A on to points B
Input:
A: Nxm numpy array of source mD points
B: Nxm numpy array of destination mD point
init_pose: (m+1)x(m+1) homogeneous transformation
max_iterations: exit algorithm after max_iterations
tolerance: convergence criteria
Output:
T: final homogeneous transformation that maps A on to B
distances: Euclidean distances (errors) of the nearest neighbor
i: number of iterations to converge
"""
# get number of dimensions
m = A.shape[1]
# make points homogeneous, copy them to maintain the originals
src = np.ones((m + 1, A.shape[0]))
dst = np.ones((m + 1, B.shape[0]))
src[:m, :] = np.copy(A.T)
dst[:m, :] = np.copy(B.T)
# apply the initial pose estimation
if init_pose is not None:
src = np.dot(init_pose, src)
prev_error = 0
max_error = 0
for i in range(max_iterations):
# find the nearest neighbors between the current source and destination points
distances, indices = nearest_neighbor(src[:m, :].T, knn)
# compute the transformation between the current source and nearest destination points
T, _, _ = best_fit_transform(src[:m, :].T, dst[:m, indices].T)
# update the current source
src = np.dot(T, src)
# check error
mean_error = np.mean(distances)
max_error = np.max(distances)
if np.abs(prev_error - mean_error) < tolerance:
break
prev_error = mean_error
# calculate final transformation
T, _, _ = best_fit_transform(A, src[:m, :].T)
return T, max_error
| 4,822 | 29.333333 | 113 | py |
robogym | robogym-master/robogym/utils/misc.py | from os.path import abspath, dirname, join
# This is the absolute path to the root directory for the robogym repo.
ROBOGYM_ROOT_PATH = abspath(join(dirname(__file__), ".."))
def robogym_path(*args):
"""
Returns an absolute path from a path relative to the robogym repository root directory.
"""
return join(ROBOGYM_ROOT_PATH, *args)
def pretty(vec, precision=3):
"""
Returns short, pretty version of a float vector.
"""
if vec is None or vec.shape[0] == 0:
return ""
ret = "["
max_entries = 6
for idx in range(vec.shape[0]):
if idx < 6 or idx > vec.shape[0] - max_entries:
if vec[idx] >= 0:
ret += " "
ret += ("%." + str(precision) + "f ") % vec[idx]
elif idx == max_entries:
ret += "... "
return ret[:-1] + "]"
| 841 | 27.066667 | 91 | py |
robogym | robogym-master/robogym/utils/dactyl_utils.py | # This function can'be removed yet. There are two places that still need it: DactylReachEnv and
# RandomizedJointLimitWrapper. The latter can't be changed until the old environments are refactored. And the first
# one relies on it for initialization. An additional refactor is needed to remove this util.
def actuated_joint_range(sim):
joint_limits = sim.model.jnt_range.copy()
for a_idx, name in enumerate(sim.model.actuator_names):
j_idx = sim.model.joint_names.index(name.replace("A_", ""))
actuated_limits = sim.model.actuator_ctrlrange[a_idx, :]
joint_limits[j_idx, 0] = max(joint_limits[j_idx, 0], actuated_limits[0])
joint_limits[j_idx, 1] = min(joint_limits[j_idx, 1], actuated_limits[1])
# avoid cases where limits cross
joint_limits[j_idx, 1] = max(joint_limits[j_idx, 0], joint_limits[j_idx, 1])
return joint_limits
| 888 | 58.266667 | 115 | py |
robogym | robogym-master/robogym/utils/testing.py | import numpy as np
def assert_dict_match(d1: dict, d2: dict, eps: float = 1e-6):
"""Assert if two dictionary variables are different.
:param eps: the threshold used when comparing two float values from dicts.
"""
assert sorted(d1.keys()) == sorted(d2.keys())
for k in d1:
assert isinstance(d1[k], type(d2[k])) # same type
if isinstance(d1[k], np.ndarray):
assert np.allclose(d1[k], d2[k], atol=eps)
elif isinstance(d1[k], (float, np.float32, np.float64)):
assert abs(d1[k] - d2[k]) < eps, f"{k}: {d1[k]} != {d2[k]}"
elif isinstance(d1[k], dict):
assert_dict_match(d1[k], d2[k])
else:
assert d1[k] == d2[k], f"{k}: {d1[k]} != {d2[k]}"
| 746 | 36.35 | 78 | py |
robogym | robogym-master/robogym/utils/parse_arguments.py | import glob
import os
from robogym.worldgen.parser.normalize import normalize_value
def parse_arguments(argv):
"""
Takes list of arguments and splits them
to argument that are of form key=value, and dictionary.
Furhter, cleans arguments (expands *, ~), and
makes sure that they refer to files, then files
are local.
"""
argv = _expand_user_rewrite(argv)
argv = _expand_wildcard_rewrite(argv)
argv, kwargs = _extract_kwargs_rewrite(argv)
_eval_kwargs(kwargs)
names = argv
print("\nInferred:")
print("\tnames: %s" % " ".join(names))
print("\targuments: %s" % str(kwargs))
print("\n")
return names, kwargs
def _expand_wildcard_rewrite(argv):
"""
:param argv: list of values
:return: If arguments contains *, than try to expand it to all fitting files.
"""
ret = []
for arg in argv:
if "*" in arg:
new_name = glob.glob(arg)
assert len(new_name) > 0, (
'Couldn\'t find any expansion to the pattern "%s"' % arg
)
ret += new_name
else:
ret.append(arg)
return ret
def _expand_user_rewrite(argv):
"""
:param argv: list of values
:return: values after the rewrite. If value contains ~ then it's expanded to home directory.
"""
ret = []
for arg in argv:
if arg[0] == "~":
arg = os.path.expanduser(arg)
ret.append(arg)
return ret
def _extract_kwargs_rewrite(argv):
"""
Splits list into dictionary like arguments and remaining arguments.
:param argv: list of values
:return: arguments that doesnt look like key=value, and dictionary with remaining arguments.
"""
kwargs = {}
ret = []
for arg in argv:
if arg.find("=") > -1:
pos = arg.find("=")
key, value = arg[:pos], arg[pos + 1:]
kwargs[key] = normalize_value(value)
else:
ret.append(arg)
return ret, kwargs
def _eval_kwargs(kwargs):
"""
Evaluates values which are strings starting with `@`, e.g. "@[]" -> [].
:param kwargs: dictionary
:return: the same dictionary but with evaluated values
"""
for key, value in kwargs.items():
if isinstance(value, str) and value[0] == "@":
kwargs[key] = eval(value[1:])
| 2,347 | 25.681818 | 96 | py |
robogym | robogym-master/robogym/utils/sensor_utils.py | OCCLUSION_MARKERS = [
"robot0:ffocclusion",
"robot0:mfocclusion",
"robot0:rfocclusion",
"robot0:lfocclusion",
"robot0:thocclusion",
]
OCCLUSION_DIST_CUTOFF = -0.0001 # neg; penetrated.
def occlusion_markers_exist(sim):
for marker in OCCLUSION_MARKERS:
if marker not in sim.model.geom_names:
return False
return True
def check_occlusion(sim, dist_cutoff=OCCLUSION_DIST_CUTOFF):
"""
Check whether there is any collision or contact with the finger occlusion detection
geoms (class = "D_Occlusion").
Given a finger occlusion geom, if there is a contact and the contact distance is smaller
than `dist_cutoff`, we consider it as "being occluded".
Returns: a list of 5 binary, indicating whether a finger (ff, mf, rf, lf, th) is occluded.
"""
target_geom_ids = [sim.model.geom_name2id(m) for m in OCCLUSION_MARKERS]
geom_ids_with_contact = set()
for i in range(sim.data.ncon):
contact = sim.data.contact[i]
if contact.dist < dist_cutoff:
geom1 = contact.geom1
geom2 = contact.geom2
geom_ids_with_contact.add(geom1)
geom_ids_with_contact.add(geom2)
return [int(g_id in geom_ids_with_contact) for g_id in target_geom_ids]
def recolor_occlusion_geoms(sim, robot_occlusion_data):
"""
Color the occlusion geoms differently according to whether the simulator and the
phasespace tracker matches.
"""
colormap = [
[0, 0, 0, 0.1], # transparent grey for both off
[1, 0, 0, 0.7], # red for robot not but sim occluded
[0, 0, 1, 0.7], # blue for robot occluded but sim not
[1, 1, 0, 1.0], # solid yellow for both occluded
]
sim_occlusion_data = check_occlusion(sim)
geom_ids = [sim.model.geom_name2id(m) for m in OCCLUSION_MARKERS]
for g_id, robot_occluded, sim_occluded in zip(
geom_ids, robot_occlusion_data, sim_occlusion_data
):
category = 2 * int(robot_occluded) + int(sim_occluded)
sim.model.geom_rgba[g_id] = colormap[category]
| 2,083 | 33.733333 | 94 | py |
robogym | robogym-master/robogym/utils/multi_goal_tracker.py | import logging
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from numpy.random import RandomState
from robogym.mujoco.simulation_interface import SimulationInterface
from robogym.utils.env_utils import InvalidSimulationError
logger = logging.getLogger(__name__)
def _sample_new_goal(goal_sample_func, _obs, _done, _env_crash, info):
# A helper function for sampling a new goal with try-catch for handling
# InvalidSimulationError.
try:
_obs = goal_sample_func()
info["goal_reset"] = True
except InvalidSimulationError:
_done = True
_env_crash = True
return _obs, _done, _env_crash
class MultiGoalTracker:
def __init__(
self,
*,
mujoco_simulation: SimulationInterface,
reset_goal_generation_fn: Callable,
reset_goal_fn: Callable,
max_timesteps_per_goal=None,
min_timesteps_per_goal=0,
success_reward: float = 5.0,
successes_needed: int = 5,
success_pause_range_s: Tuple[float, float] = (0.0, 0.0),
max_steps_goal_unreachable: int = 10,
check_goal_reachable=False,
use_goal_distance_reward=True,
goal_types: Optional[Set[str]] = None,
random_state: Optional[RandomState] = None
):
"""
Stats tracker for multiple goals.
:param mj_sim: A mujoco sim object.
:param max_timesteps_per_goal: How many gym steps we can make before giving up.
:param min_timesteps_per_goal: How many gym step a goal should persist. This option is
require to prevent reset from happening too frequently, which may slow down rollouts.
:param success_reward: Reward when one goal is successful.
:param successes_needed: Number of goals to achieve to consider the whole episode successful.
:param success_pause_range_s: Number of seconds to sample the amount of time that success
needs to stay in successful state to get the reward.
:param max_steps_goal_unreachable: Number of gym steps we can make before considering the
goal is unreachable.
:param check_goal_reachable: If true, check whether goal is reachable from current state.
:param use_goal_distance_reward: If true, use goal distance reward.
:param random_state: A numpy random state object.
"""
self.max_timesteps_per_goal = max_timesteps_per_goal
self.min_timesteps_per_goal = min_timesteps_per_goal
self.max_steps_goal_unreachable = max_steps_goal_unreachable
self.success_pause_range_s = success_pause_range_s
self.success_reward = success_reward
self.successes_needed = successes_needed
self.check_goal_reachable = check_goal_reachable
self.use_goal_distance_reward = use_goal_distance_reward
self.goal_types = goal_types if goal_types is not None else ["generic"]
self.mujoco_simulation = mujoco_simulation
self.reset_goal_generation_fn = reset_goal_generation_fn
self.reset_goal_fn = reset_goal_fn
if random_state is None:
self._random_state = RandomState()
else:
self._random_state = random_state
self.reset()
def _set_success_step_range(self) -> List[float]:
mj_sim = self.mujoco_simulation.mj_sim
env_step_duration = mj_sim.nsubsteps * mj_sim.model.opt.timestep
success_step_range = sorted(
[max(1, s / env_step_duration) for s in self.success_pause_range_s]
)
assert len(success_step_range) == 2
return success_step_range
def reset(self):
"""Reset the state of MultiGoalTracker for starting a new episode,
so the entire goal generation is reset.
"""
self._success_step_range = self._set_success_step_range()
self._success_steps_required = self._random_state.randint(
self._success_step_range[0], self._success_step_range[1] + 1
)
self._steps = 0
self._consecutive_steps_with_success = 0
self._consecutive_steps_with_goal_unreachable = 0
self._success_and_no_goal_reset = False
self._goals_so_far = 0
self._successes_so_far = 0
self._successes_so_far_by_goal_type = {k: 0 for k in self.goal_types}
self._steps_since_last_goal = 0
self._steps_by_goal_type = {k: 0 for k in self.goal_types}
self._trial_success = False
self._env_crash = False
self._sub_goal_is_successful = False
def reset_goal_steps(self):
"""Reset the stats for a new goal within one episode.
"""
self._goals_so_far += 1
self._steps_since_last_goal = 0
self._success_steps_required = self._random_state.randint(
self._success_step_range[0], self._success_step_range[1] + 1
)
self._consecutive_steps_with_success = 0
self._consecutive_steps_with_goal_unreachable = 0
def _steps_per_success(self, total_steps, unsuccessful_steps, successes) -> float:
# A helper function for computing avg steps per succeeded goals.
if successes > 0:
return float(total_steps - unsuccessful_steps) / successes
else:
return float(self.max_timesteps_per_goal)
def initial_info(self) -> Dict[str, Any]:
info: Dict[str, Any] = {}
info["trial_success"] = False
info["sub_goal_is_successful"] = False
info["sub_goal_type"] = "generic"
info["goals_so_far"] = 1
info["successes_so_far"] = 0
info["successes_so_far_by_goal_type"] = {k: 0 for k in self.goal_types}
info["steps_since_last_goal"] = 0
info["steps_by_goal_type"] = {k: 0 for k in self.goal_types}
info["goal_terminally_unreachable"] = False
info["steps_per_success"] = self.max_timesteps_per_goal
info["steps_per_success_by_goal_type"] = {
k: self.max_timesteps_per_goal for k in self.goal_types
}
info["env_crash"] = False
return info
def process(
self,
obs,
env_reward,
done,
info,
goal_distance_reward,
is_successful,
goal_info: dict,
):
assert isinstance(env_reward, float)
goal_reachable = goal_info.get("goal_reachable", True)
solved = goal_info.get("solved", False)
goal_type = goal_info.get("goal", {}).get("goal_type", "generic")
success_reward = 0.0
self._env_crash = False
self._trial_success = False
self._sub_goal_is_successful = False
self._steps += 1
self._steps_since_last_goal += 1
self._steps_by_goal_type[goal_type] += 1
if is_successful:
self._consecutive_steps_with_success += 1
else:
self._consecutive_steps_with_success = 0
if not goal_reachable:
self._consecutive_steps_with_goal_unreachable += 1
else:
self._consecutive_steps_with_goal_unreachable = 0
unreachable_state_persists = (
self._consecutive_steps_with_goal_unreachable
>= self.max_steps_goal_unreachable
)
if (
self._consecutive_steps_with_success >= self._success_steps_required
and not self._success_and_no_goal_reset
):
success_reward = self.success_reward
self._successes_so_far += 1
self._successes_so_far_by_goal_type[goal_type] += 1
self._success_and_no_goal_reset = True
self._sub_goal_is_successful = True
elif self._steps_since_last_goal >= self.max_timesteps_per_goal:
# Even if env is not done, the wrapper ends the episode
done = True
elif self.check_goal_reachable and unreachable_state_persists:
# If the goal is not reachable we reset goal generation state.
obs, done, env_crash = _sample_new_goal(
self.reset_goal_generation_fn, obs, done, self._env_crash, info
)
if (
self._success_and_no_goal_reset
and self._steps_since_last_goal >= self.min_timesteps_per_goal
):
self._success_and_no_goal_reset = False
if self._successes_so_far >= self.successes_needed or solved:
# Get enough number of successes so it is time to end this episode.
done = True
self._trial_success = True
self._steps_since_last_goal = 0
else:
obs, done, self._env_crash = _sample_new_goal(
self.reset_goal_fn, obs, done, self._env_crash, info
)
goal_reward = goal_distance_reward if self.use_goal_distance_reward else 0.0
goal_reward -= goal_info.get("penalty", 0.0)
reward = [env_reward, goal_reward, success_reward]
info = self.update_info(info, goal_info)
return obs, reward, done, info
def update_info(self, info: dict, goal_info: dict) -> dict:
goal_type = goal_info.get("goal", {}).get("goal_type", "generic")
unreachable_state_persists = (
self._consecutive_steps_with_goal_unreachable
>= self.max_steps_goal_unreachable
)
# Extract this here since self.reset_goal() changes the counter. This wrapper is crazy.
info["consecutive_steps_with_success"] = self._consecutive_steps_with_success
info["sub_goal_is_successful"] = self._sub_goal_is_successful
info["sub_goal_type"] = goal_type
info["steps_since_last_goal"] = self._steps_since_last_goal
info["trial_success"] = self._trial_success
info["goals_so_far"] = self._goals_so_far
info["successes_so_far"] = self._successes_so_far
info["successes_so_far_by_goal_type"] = self._successes_so_far_by_goal_type
info["steps_by_goal_type"] = self._steps_by_goal_type
info["env_crash"] = self._env_crash
info["goal_terminally_unreachable"] = unreachable_state_persists
info["steps_per_success"] = self._steps_per_success(
self._steps, self._steps_since_last_goal, self._successes_so_far
)
info["steps_per_success_by_goal_type"] = {
goal: self._steps_per_success(
self._steps_by_goal_type[goal],
int(goal_type == goal) * self._steps_since_last_goal,
self._successes_so_far_by_goal_type[goal],
)
for goal in self._steps_by_goal_type
}
return info
| 10,623 | 37.215827 | 101 | py |
robogym | robogym-master/robogym/utils/rubik_utils.py | import kociemba
import pycuber
def solve_fast(cube, max_depth=24):
assert isinstance(cube, pycuber.Cube)
coloring = str(cube).replace("[", "").replace("]", "").replace(" ", " ")
coloring = coloring.split("\n")
seq = coloring[0].strip() + coloring[1].strip() + coloring[2].strip()
seq += coloring[3][6:9]
seq += coloring[4][6:9]
seq += coloring[5][6:9]
seq += coloring[3][3:6]
seq += coloring[4][3:6]
seq += coloring[5][3:6]
seq += coloring[6][3:6]
seq += coloring[7][3:6]
seq += coloring[8][3:6]
seq += coloring[3][:3]
seq += coloring[4][:3]
seq += coloring[5][:3]
seq += coloring[3][9:12]
seq += coloring[4][9:12]
seq += coloring[5][9:12]
seq = seq.replace("y", "U")
seq = seq.replace("g", "F")
seq = seq.replace("w", "D")
seq = seq.replace("r", "L")
seq = seq.replace("o", "R")
seq = seq.replace("b", "B")
return kociemba.solve(seq, max_depth=max_depth)
| 964 | 29.15625 | 78 | py |
robogym | robogym-master/robogym/utils/rotation.py | # Many methods borrow heavily or entirely from transforms3d https://github.com/matthew-brett/transforms3d
# eventually some of these may be upstreamed, but credit to transforms3d
# authors for implementing the many of the formulations we use here.
import itertools
import numpy as np
"""
Rotations
=========
Note: these have caused many subtle bugs in the past.
Be careful while updating these methods and while using them in clever ways.
See MuJoCo documentation here: http://mujoco.org/book/modeling.html#COrientation
Conventions
-----------
- All functions accept batches as well as individual rotations
- All rotation conventions match respective MuJoCo defaults
- All angles are in radians
- Matricies follow LR convention
- Euler Angles are all relative with 'xyz' axes ordering
- See specific representation for more information
Representations
---------------
Euler
There are many euler angle frames -- here we will strive to use the default
in MuJoCo, which is eulerseq='xyz'.
This frame is a relative rotating frame, about x, y, and z axes in order.
Relative rotating means that after we rotate about x, then we use the
new (rotated) y, and the same for z.
Quaternions
These are defined in terms of rotation (angle) about a unit vector (x, y, z)
We use the following <q0, q1, q2, q3> convention:
q0 = cos(angle / 2)
q1 = sin(angle / 2) * x
q2 = sin(angle / 2) * y
q3 = sin(angle / 2) * z
This is also sometimes called qw, qx, qy, qz.
Note that quaternions are ambiguous, because we can represent a rotation by
angle about vector <x, y, z> and -angle about vector <-x, -y, -z>.
To choose between these, we pick "first nonzero positive", where we
make the first nonzero element of the quaternion positive.
This can result in mismatches if you're converting an quaternion that is not
"first nonzero positive" to a different representation and back.
Axis Angle
(Not currently implemented)
These are very straightforward. Rotation is angle about a unit vector.
XY Axes
(Not currently implemented)
We are given x axis and y axis, and z axis is cross product of x and y.
Z Axis
This is NOT RECOMMENDED. Defines a unit vector for the Z axis,
but rotation about this axis is not well defined.
Instead pick a fixed reference direction for another axis (e.g. X)
and calculate the other (e.g. Y = Z cross-product X),
then use XY Axes rotation instead.
SO3
(Not currently implemented)
While not supported by MuJoCo, this representation has a lot of nice features.
We expect to add support for these in the future.
TODO / Missing
--------------
- Rotation integration or derivatives (e.g. velocity conversions)
- More representations (SO3, etc)
- Random sampling (e.g. sample uniform random rotation)
- Performance benchmarks/measurements
- (Maybe) define everything as to/from matricies, for simplicity
"""
# For testing whether a number is close to zero
_FLOAT_EPS = np.finfo(np.float64).eps
_EPS4 = _FLOAT_EPS * 4.0
def euler2mat(euler):
""" Convert Euler Angles to Rotation Matrix. See rotation.py for notes """
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler)
ai, aj, ak = -euler[..., 2], -euler[..., 1], -euler[..., 0]
si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
mat = np.empty(euler.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 2, 2] = cj * ck
mat[..., 2, 1] = sj * sc - cs
mat[..., 2, 0] = sj * cc + ss
mat[..., 1, 2] = cj * sk
mat[..., 1, 1] = sj * ss + cc
mat[..., 1, 0] = sj * cs - sc
mat[..., 0, 2] = -sj
mat[..., 0, 1] = cj * si
mat[..., 0, 0] = cj * ci
return mat
def euler2quat(euler):
""" Convert Euler Angles to Quaternions. See rotation.py for notes """
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, "Invalid shape euler {}".format(euler)
ai, aj, ak = euler[..., 2] / 2, -euler[..., 1] / 2, euler[..., 0] / 2
si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
quat = np.empty(euler.shape[:-1] + (4,), dtype=np.float64)
quat[..., 0] = cj * cc + sj * ss
quat[..., 3] = cj * sc - sj * cs
quat[..., 2] = -(cj * ss + sj * cc)
quat[..., 1] = cj * cs - sj * sc
return quat
def mat2euler(mat):
""" Convert Rotation Matrix to Euler Angles. See rotation.py for notes """
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), "Invalid shape matrix {}".format(mat)
cy = np.sqrt(mat[..., 2, 2] * mat[..., 2, 2] + mat[..., 1, 2] * mat[..., 1, 2])
condition = cy > _EPS4
euler = np.empty(mat.shape[:-1], dtype=np.float64)
euler[..., 2] = np.where(
condition,
-np.arctan2(mat[..., 0, 1], mat[..., 0, 0]),
-np.arctan2(-mat[..., 1, 0], mat[..., 1, 1]),
)
euler[..., 1] = np.where(
condition, -np.arctan2(-mat[..., 0, 2], cy), -np.arctan2(-mat[..., 0, 2], cy)
)
euler[..., 0] = np.where(
condition, -np.arctan2(mat[..., 1, 2], mat[..., 2, 2]), 0.0
)
return euler
def mat2quat(mat):
""" Convert Rotation Matrix to Quaternion. See rotation.py for notes """
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), "Invalid shape matrix {}".format(mat)
Qxx, Qyx, Qzx = mat[..., 0, 0], mat[..., 0, 1], mat[..., 0, 2]
Qxy, Qyy, Qzy = mat[..., 1, 0], mat[..., 1, 1], mat[..., 1, 2]
Qxz, Qyz, Qzz = mat[..., 2, 0], mat[..., 2, 1], mat[..., 2, 2]
# Fill only lower half of symmetric matrix
K = np.zeros(mat.shape[:-2] + (4, 4), dtype=np.float64)
K[..., 0, 0] = Qxx - Qyy - Qzz
K[..., 1, 0] = Qyx + Qxy
K[..., 1, 1] = Qyy - Qxx - Qzz
K[..., 2, 0] = Qzx + Qxz
K[..., 2, 1] = Qzy + Qyz
K[..., 2, 2] = Qzz - Qxx - Qyy
K[..., 3, 0] = Qyz - Qzy
K[..., 3, 1] = Qzx - Qxz
K[..., 3, 2] = Qxy - Qyx
K[..., 3, 3] = Qxx + Qyy + Qzz
K /= 3.0
# TODO: vectorize this -- probably could be made faster
q = np.empty(K.shape[:-2] + (4,))
it = np.nditer(q[..., 0], flags=["multi_index"])
while not it.finished:
# Use Hermitian eigenvectors, values for speed
vals, vecs = np.linalg.eigh(K[it.multi_index])
# Select largest eigenvector, reorder to w,x,y,z quaternion
q[it.multi_index] = vecs[[3, 0, 1, 2], np.argmax(vals)]
# Prefer quaternion with positive w
# (q * -1 corresponds to same rotation as q)
if q[it.multi_index][0] < 0:
q[it.multi_index] *= -1
it.iternext()
return q
def quat2euler(quat):
""" Convert Quaternion to Euler Angles. See rotation.py for notes """
return mat2euler(quat2mat(quat))
def subtract_euler(e1, e2):
assert e1.shape == e2.shape
assert e1.shape[-1] == 3
q1 = euler2quat(e1)
q2 = euler2quat(e2)
q_diff = quat_mul(q1, quat_conjugate(q2))
return quat2euler(q_diff)
def quat2mat(quat):
""" Convert Quaternion to Euler Angles. See rotation.py for notes """
quat = np.asarray(quat, dtype=np.float64)
assert quat.shape[-1] == 4, "Invalid shape quat {}".format(quat)
w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]
Nq = np.sum(quat * quat, axis=-1)
s = 2.0 / Nq
X, Y, Z = x * s, y * s, z * s
wX, wY, wZ = w * X, w * Y, w * Z
xX, xY, xZ = x * X, x * Y, x * Z
yY, yZ, zZ = y * Y, y * Z, z * Z
mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 0, 0] = 1.0 - (yY + zZ)
mat[..., 0, 1] = xY - wZ
mat[..., 0, 2] = xZ + wY
mat[..., 1, 0] = xY + wZ
mat[..., 1, 1] = 1.0 - (xX + zZ)
mat[..., 1, 2] = yZ - wX
mat[..., 2, 0] = xZ - wY
mat[..., 2, 1] = yZ + wX
mat[..., 2, 2] = 1.0 - (xX + yY)
return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))
def quat_conjugate(q):
inv_q = -q
inv_q[..., 0] *= -1
return inv_q
def quat_mul(q0, q1):
assert q0.shape == q1.shape
assert q0.shape[-1] == 4
assert q1.shape[-1] == 4
w0 = q0[..., 0]
x0 = q0[..., 1]
y0 = q0[..., 2]
z0 = q0[..., 3]
w1 = q1[..., 0]
x1 = q1[..., 1]
y1 = q1[..., 2]
z1 = q1[..., 3]
w = w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1
x = w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1
y = w0 * y1 + y0 * w1 + z0 * x1 - x0 * z1
z = w0 * z1 + z0 * w1 + x0 * y1 - y0 * x1
q = np.array([w, x, y, z])
if q.ndim == 2:
q = q.swapaxes(0, 1)
assert q.shape == q0.shape
return q
def quat_rot_vec(q, v0):
q_v0 = np.array([0, v0[0], v0[1], v0[2]])
q_v = quat_mul(q, quat_mul(q_v0, quat_conjugate(q)))
v = q_v[1:]
return v
def quat_identity():
return np.array([1, 0, 0, 0])
def quat_difference(q, p):
return quat_normalize(quat_mul(q, quat_conjugate(p)))
def quat_magnitude(q):
w = q[..., 0]
assert np.all(w >= 0)
return 2 * np.arccos(np.clip(w, -1.0, 1.0))
def quat_normalize(q):
assert q.shape[-1] == 4
sign = np.sign(q[..., [0]])
# Sign takes value of 0 whenever the input is 0, but we actually don't want to do that
sign[sign == 0] = 1
return q * sign # use quat with w >= 0
def quat_average(quats, weights=None):
"""Weighted average of a list of quaternions."""
n_quats = len(quats)
weights = np.array([1.0 / n_quats] * n_quats if weights is None else weights)
assert np.all(weights >= 0.0)
assert len(weights) == len(quats)
weights = weights / np.sum(weights)
# Average of quaternion:
# https://math.stackexchange.com/questions/1204228
# /how-would-i-apply-an-exponential-moving-average-to-quaternions
outer_prods = [w * np.outer(q, q) for w, q in zip(weights, quats)]
summed_outer_prod = np.sum(outer_prods, axis=0)
assert summed_outer_prod.shape == (4, 4)
evals, evecs = np.linalg.eig(summed_outer_prod)
evals, evecs = np.real(evals), np.real(evecs)
biggest_i = np.argmax(np.real(evals))
return quat_normalize(evecs[:, biggest_i])
def quat2axisangle(quat):
theta = 0
axis = np.array([0, 0, 1])
sin_theta = np.linalg.norm(quat[1:])
if sin_theta > 0.0001:
theta = 2 * np.arcsin(sin_theta)
theta *= 1 if quat[0] >= 0 else -1
axis = quat[1:] / sin_theta
return axis, theta
def euler2point_euler(euler):
_euler = euler.copy()
if len(_euler.shape) < 2:
_euler = np.expand_dims(_euler, 0)
assert _euler.shape[1] == 3
_euler_sin = np.sin(_euler)
_euler_cos = np.cos(_euler)
return np.concatenate([_euler_sin, _euler_cos], axis=-1)
def point_euler2euler(euler):
_euler = euler.copy()
if len(_euler.shape) < 2:
_euler = np.expand_dims(_euler, 0)
assert _euler.shape[1] == 6
angle = np.arctan(_euler[..., :3] / _euler[..., 3:])
angle[_euler[..., 3:] < 0] += np.pi
return angle
def quat2point_quat(quat):
# Should be in qw, qx, qy, qz
_quat = quat.copy()
if len(_quat.shape) < 2:
_quat = np.expand_dims(_quat, 0)
assert _quat.shape[1] == 4
angle = np.arccos(_quat[:, [0]]) * 2
xyz = _quat[:, 1:]
xyz[np.squeeze(np.abs(np.sin(angle / 2))) >= 1e-5] = (xyz / np.sin(angle / 2))[
np.squeeze(np.abs(np.sin(angle / 2))) >= 1e-5
]
return np.concatenate([np.sin(angle), np.cos(angle), xyz], axis=-1)
def point_quat2quat(quat):
_quat = quat.copy()
if len(_quat.shape) < 2:
_quat = np.expand_dims(_quat, 0)
assert _quat.shape[1] == 5
angle = np.arctan(_quat[:, [0]] / _quat[:, [1]])
qw = np.cos(angle / 2)
qxyz = _quat[:, 2:]
qxyz[np.squeeze(np.abs(np.sin(angle / 2))) >= 1e-5] = (qxyz * np.sin(angle / 2))[
np.squeeze(np.abs(np.sin(angle / 2))) >= 1e-5
]
return np.concatenate([qw, qxyz], axis=-1)
def normalize_angles(angles, low=-np.pi, high=np.pi):
"""Puts angles in [low, high] range."""
angles = angles.copy()
if angles.size > 0:
angles = np.mod(angles - low, high - low) + low
assert low - 1e-6 <= angles.min() and angles.max() <= high + 1e-6
return angles
def round_to_straight_angles(angles):
"""Returns closest angle modulo 90 degrees """
angles = np.round(angles / (np.pi / 2)) * (np.pi / 2)
return normalize_angles(angles)
def round_to_straight_quat(quat):
angles = quat2euler(quat)
rounded_angles = round_to_straight_angles(angles)
return euler2quat(rounded_angles)
def get_parallel_rotations():
mult90 = [0, np.pi / 2, -np.pi / 2, np.pi]
parallel_rotations = []
for euler in itertools.product(mult90, repeat=3):
canonical = mat2euler(euler2mat(euler))
canonical = np.round(canonical / (np.pi / 2))
if canonical[0] == -2:
canonical[0] = 2
if canonical[2] == -2:
canonical[2] = 2
canonical *= np.pi / 2
if all([(canonical != rot).any() for rot in parallel_rotations]):
parallel_rotations += [canonical]
assert len(parallel_rotations) == 24
return parallel_rotations
def get_parallel_rotations_180():
mult180 = [0, np.pi]
parallel_rotations = []
for euler in itertools.product(mult180, repeat=3):
canonical = mat2euler(euler2mat(euler))
canonical = np.round(canonical / (np.pi / 2))
if canonical[0] == -2:
canonical[0] = 2
if canonical[2] == -2:
canonical[2] = 2
canonical *= np.pi / 2
if all([(canonical != rot).any() for rot in parallel_rotations]):
parallel_rotations += [canonical]
assert len(parallel_rotations) == 4
return parallel_rotations
def quat_from_angle_and_axis(angle, axis):
assert axis.shape[-1] == 3
axis /= np.linalg.norm(axis, axis=-1, keepdims=True)
angle = np.reshape(angle, axis[..., :1].shape)
w = np.cos(angle / 2.0)
v = np.sin(angle / 2.0) * axis
quat = np.concatenate([w, v], axis=-1)
quat /= np.linalg.norm(quat, axis=-1, keepdims=True)
assert np.array_equal(quat.shape[:-1], axis.shape[:-1])
return quat
def uniform_quat(random):
""" Returns a quaternion uniformly at random. Choosing a random axis/angle or even uniformly
random Euler angles will result in a biased angle rather than a spherically symmetric one.
See https://en.wikipedia.org/wiki/Rotation_matrix#Uniform_random_rotation_matrices for details.
"""
w = random.randn(4)
return quat_normalize(w / np.linalg.norm(w))
def apply_euler_rotations(base_quat, rotation_angles):
"""Apply a sequence of euler angle rotations on to the base quaternion
"""
new_rot_mat = np.eye(3)
for rot_angle in rotation_angles:
new_rot_mat = np.matmul(euler2mat(rot_angle * np.pi / 2.0), new_rot_mat)
new_rot_mat = np.matmul(quat2mat(base_quat), new_rot_mat)
new_quat = mat2quat(new_rot_mat)
return new_quat
def any_orthogonal(vec):
""" Return any (unit length) vector orthogonal to vec, in a numerically stable way """
promising_axis = np.eye(3)[np.abs(vec).argmin()]
non_unit_len = np.cross(vec, promising_axis)
return non_unit_len / np.linalg.norm(non_unit_len)
def vectors2quat(v_from, v_to):
""" Define a quaternion rotating along the shortest arc from v_from to v_to """
q = np.zeros(4)
dot = np.dot(v_from, v_to)
v11 = np.dot(v_from, v_from)
v22 = np.dot(v_to, v_to)
q[0] = np.sqrt(v11 * v22) + dot
q[1:4] = np.cross(v_from, v_to)
if np.linalg.norm(q) < 1e-6:
# The norm of q is zero if v_from == -v_to, in such case we need to rotate 180 degrees
# along some not well defined vector orthogonal to both v_from and v_to
orthogonal = any_orthogonal(v_from)
q[0] = 0.0 # this is cos(alpha/2) which means rotation 180 deg
q[1:4] = orthogonal
return quat_normalize(q / np.linalg.norm(q))
def rot_z_aligned(cube_quat, quat_threshold, include_flip=True):
"""
Determines if the cube is within quat_threshold of a z-aligned orientation, which means that
one of the rotatable faces of the **face** cube is on top.
This means that either the euler angles
are some pure rotation around z, or they are 180 degree rotation around the x-axis plus some
rotation around the z.
"""
cube_angles = quat2euler(cube_quat)
target_angle = np.eye(3)[-1] * cube_angles
# if include_flip: True, allow z-axis rotation with top or bottom face as top
# else, allow z-axis rotation only with top face as top
if include_flip:
x_flip = np.asarray([np.pi, 0, 0])
aligned_angles = [target_angle, target_angle + x_flip]
else:
aligned_angles = [target_angle]
for aligned_angle in aligned_angles:
aligned_quat = euler2quat(aligned_angle)
quat_diff = quat_difference(cube_quat, aligned_quat)
quat_dist = quat_magnitude(quat_diff)
if quat_dist < quat_threshold:
return True
return False
def rot_xyz_aligned(cube_quat, quat_threshold):
"""
Determines if the cube is within quat_threshold of a xyz-aligned orientation, which means that
one of the rotatable faces of the **full** cube is on top.
This means that one of the axes of local coordinate system of the cube is pointing straight up.
"""
z_up = np.array([0, 0, 1]).reshape(3, 1)
mtx = quat2mat(cube_quat)
# Axis that is the closest (by dotproduct) to z-up
axis_nr = np.abs((z_up.T @ mtx)).argmax()
# Axis of the cube pointing the closest to the top
axis = mtx[:, axis_nr]
axis = axis * np.sign(axis @ z_up)
# Quaternion representing the rotation from "axis" that is almost up to
# the actual "up" direction
difference_quat = vectors2quat(axis, z_up[:, 0])
return quat_magnitude(difference_quat) < quat_threshold
def random_unity2(random_state):
"""
Generates a random 3D unit vector (direction) with a uniform spherical distribution
Algo from http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
"""
phi = random_state.uniform(0, np.pi * 2)
costheta = random_state.uniform(-1, 1)
theta = np.arccos(costheta)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.array([x, y, z])
| 18,620 | 32.611913 | 105 | py |
robogym | robogym-master/robogym/utils/env_utils.py | import glob
import json
import os
from copy import deepcopy
from functools import partial
from runpy import run_path
import _jsonnet
import numpy as np
from gym.spaces import Box, Dict, Tuple
class InvalidSimulationError(Exception):
pass
def gym_space_from_arrays(arrays):
""" Define environment observation space using an example observation """
if isinstance(arrays, np.ndarray):
ret = Box(-np.inf, np.inf, arrays.shape, np.float32)
ret.flatten_dim = np.prod(ret.shape)
elif isinstance(arrays, (tuple, list)):
ret = Tuple([gym_space_from_arrays(arr) for arr in arrays])
elif isinstance(arrays, dict):
ret = Dict(dict([(k, gym_space_from_arrays(v)) for k, v in arrays.items()]))
else:
raise TypeError(f"Array is of unsupported type: {type(arrays)}")
return ret
def merge_dict_recursive(d1, d2):
ret = deepcopy(d1)
for k, v in d2.items():
if k not in d1 or not isinstance(v, dict):
ret[k] = v
else:
ret[k] = merge_dict_recursive(d1[k], v)
return ret
def get_function(obj):
if callable(obj):
# Should only be used in tests!
return obj
name = obj["function"]
extra_args = obj.get("args", {})
module_path, function_name = name.rsplit(":", 1)
result = getattr(__import__(module_path, fromlist=(function_name,)), function_name)
if len(extra_args) > 0:
def result_wrapper(*args, **kwargs):
actual_kwargs = merge_dict_recursive(extra_args, kwargs)
return result(*args, **actual_kwargs)
return result_wrapper
else:
return result
class MakeEnvFinder:
ENV_PATTERNS = [
os.path.abspath(
os.path.join(os.path.dirname(__file__), "../envs", "**", "*.py")
)
]
@classmethod
def find(cls, pattern, fun_name=None):
if pattern.endswith("py") and os.path.exists(pattern):
if fun_name is None:
fun_name = "make_env"
print(f"Loading environment from {pattern}::{fun_name}")
module = run_path(pattern)
make_env = module[fun_name]
return make_env
elif pattern.endswith((".jsonnet", ".libsonnet", ".json")) and os.path.exists(
pattern
):
def resolve_fun_name(data, fun_name):
for elem in fun_name.split("."):
data = data[elem]
return data
print(f"Loading environment from {pattern}::{fun_name}")
if pattern.endswith(".json"):
with open(pattern, "r") as f:
data = json.load(f)
else:
data = json.loads(_jsonnet.evaluate_file(pattern))
if fun_name is not None:
resolved_data = resolve_fun_name(data, fun_name)
else:
# Auto-detect a working fun_name
candidates = ["make_env", "machine_pools.evaluator.args.make_env"]
resolved_data = None
for candidate in candidates:
try:
resolved_data = resolve_fun_name(data, candidate)
break
except KeyError:
pass
assert (
resolved_data is not None
), "could not auto-detect a function name; please provide it (e.g. via `::machine_pools.bch-b1.args.make_env`)"
make_env = get_function(resolved_data)
return make_env
else:
matching = [
m for p in cls.ENV_PATTERNS for m in glob.glob(p, recursive=True)
]
matching = [match for match in matching if match.find(pattern) > -1]
matching = [
match
for match in matching
if not os.path.basename(match).startswith("test_")
]
assert len(matching) < 2, "Found multiple environments matching %s" % str(
matching
)
if len(matching) == 1:
matching = matching[0]
if matching.endswith(".py") and fun_name is not None:
matching += "::" + fun_name
return cls.find(matching)
else:
assert None
def load_env(
pattern,
make_env_finder=MakeEnvFinder,
arg_filter=None,
return_args_remaining=False,
**kwargs,
):
args_remaining = {}
pattern = pattern.split("::")
fun_name = None
if len(pattern) == 1:
pattern = pattern[0]
else:
assert len(pattern) == 2
pattern, fun_name = pattern[0], pattern[1]
make_env = make_env_finder.find(pattern, fun_name=fun_name)
assert make_env is not None, f"No environment found matching {pattern}::{fun_name}"
if arg_filter is not None:
kwargs, args_remaining = arg_filter(make_env, kwargs)
make_env = partial(make_env, **kwargs)
env = make_env()
if return_args_remaining:
return env, args_remaining
else:
return env
| 5,116 | 29.640719 | 127 | py |
robogym | robogym-master/robogym/utils/mesh.py | from typing import Tuple
import numpy as np
import trimesh
def get_vertices_bounding_box(vertices: np.ndarray) -> Tuple[float, float, float]:
min_xyz = np.min(vertices, axis=0)
max_xyz = np.max(vertices, axis=0)
size = (max_xyz - min_xyz) / 2.0
assert np.all(size >= 0.0)
pos = min_xyz + size
return pos, size, np.linalg.norm(size)
def subdivide_mesh(
vertices: np.ndarray, faces: np.ndarray, subdivide_threshold: float
) -> np.ndarray:
"""
Subdivide mesh into smaller triangles.
:param vertices: Vertices of the mesh.
:param faces: Faces of the mesh.
:param subdivide_threshold: The max length for edges after the subdivision is
defined as norm(bounding_box_size) * subdivide_threshold
:return: Vertices after subdivision
"""
max_edge = get_vertices_bounding_box(vertices)[-1] * subdivide_threshold
return trimesh.remesh.subdivide_to_size(vertices, faces, max_edge)[0]
| 947 | 27.727273 | 82 | py |
robogym | robogym-master/robogym/utils/tests/test_rotation.py | import itertools as it
import unittest
import numpy as np
from mujoco_py import functions
from numpy.random import randint, uniform
from numpy.testing import assert_allclose
from scipy.linalg import inv, sqrtm
from transforms3d import euler, quaternions
from robogym.utils.rotation import (
any_orthogonal,
euler2mat,
euler2quat,
mat2euler,
mat2quat,
quat2euler,
quat2mat,
quat_average,
quat_magnitude,
quat_normalize,
rot_xyz_aligned,
vectors2quat,
)
N = 10 # Number of trials to run
def normalize_mat(mat):
if np.abs(np.linalg.det(mat)) < 1e-10:
raise ValueError("Matrix too close to singular")
mat = np.real(mat.dot(inv(sqrtm(mat.T.dot(mat)))))
if np.linalg.det(mat) < 0:
mat *= -1
return mat
def normalize_quat(quat):
quat /= np.sqrt(np.sum(np.square(quat)))
if quat[0] < 0:
quat *= -1
return quat
def random_unit_length_vec():
v = np.random.randn(3) * 5
while np.linalg.norm(v) < 1e-4:
v = np.random.randn(3)
return v / np.linalg.norm(v)
class RotationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(112358)
def test_euler2mat(self):
s = (N, N, 3)
eulers = uniform(-4, 4, size=s) * randint(2, size=s)
mats = euler2mat(eulers)
self.assertEqual(mats.shape, (N, N, 3, 3))
for i in range(N):
for j in range(N):
res = euler.euler2mat(*eulers[i, j], axes="rxyz")
np.testing.assert_almost_equal(mats[i, j], res)
def test_euler2quat(self):
s = (N, N, 3)
eulers = uniform(-4, 4, size=s) * randint(2, size=s)
quats = euler2quat(eulers)
self.assertEqual(quats.shape, (N, N, 4))
for i in range(N):
for j in range(N):
res = euler.euler2quat(*eulers[i, j], axes="rxyz")
np.testing.assert_almost_equal(quats[i, j], res)
def test_mat2euler(self):
s = (N, N, 3, 3)
mats = uniform(-4, 4, size=s) * randint(2, size=s)
eulers = mat2euler(mats)
self.assertEqual(eulers.shape, (N, N, 3))
for i in range(N):
for j in range(N):
res = euler.mat2euler(mats[i, j], axes="rxyz")
np.testing.assert_almost_equal(eulers[i, j], res)
def test_mat2quat(self):
s = (N, N, 3, 3)
mats = uniform(-4, 4, size=s) * randint(2, size=s)
quats = mat2quat(mats)
self.assertEqual(quats.shape, (N, N, 4))
for i in range(N):
for j in range(N):
# Compare to transforms3d
res = quaternions.mat2quat(mats[i, j])
np.testing.assert_almost_equal(quats[i, j], res)
# Compare to MuJoCo
try:
mat = normalize_mat(mats[i, j])
except (np.linalg.linalg.LinAlgError, ValueError):
continue # Singular matrix, NaNs
res[:] = 0
functions.mju_mat2Quat(res, mat.flatten())
res = normalize_quat(res)
quat = mat2quat(mat)
# quat is the same rotation as -quat
assert np.allclose(quat, res) or np.allclose(
-quat, res
), "quat {} res {}".format(quat, res)
def test_quat2euler(self):
s = (N, N, 4)
quats = uniform(-1, 1, size=s) * randint(2, size=s)
eulers = quat2euler(quats)
self.assertEqual(eulers.shape, (N, N, 3))
for i in range(N):
for j in range(N):
res = euler.quat2euler(quats[i, j], axes="rxyz")
np.testing.assert_almost_equal(eulers[i, j], res)
def test_quat2mat(self):
s = (N, N, 4)
quats = uniform(-1, 1, size=s) * randint(2, size=s)
mats = quat2mat(quats)
self.assertEqual(mats.shape, (N, N, 3, 3))
for i in range(N):
for j in range(N):
# Compare to transforms3d
res = quaternions.quat2mat(quats[i, j])
np.testing.assert_almost_equal(mats[i, j], res)
# Compare to MuJoCo
quat = normalize_quat(quats[i, j])
mat = np.zeros(9, dtype=np.float64)
functions.mju_quat2Mat(mat, quat)
if np.isnan(mat).any():
continue # MuJoCo returned NaNs
np.testing.assert_almost_equal(quat2mat(quat), mat.reshape((3, 3)))
def test_mat2quat2euler2mat(self):
s = (N, N, 3, 3)
mats = uniform(-np.pi, np.pi, size=s) * randint(2, size=s)
for i in range(N):
for j in range(N):
try:
mat = normalize_mat(mats[i, j])
except: # noqa
continue # Singular Matrix or NaNs
result = euler2mat(quat2euler(mat2quat(mat)))
np.testing.assert_allclose(mat, result, atol=1e-8, rtol=1e-6)
def test_mat2euler2quat2mat(self):
s = (N, N, 3, 3)
mats = uniform(-np.pi, np.pi, size=s) * randint(2, size=s)
for i in range(N):
for j in range(N):
try:
mat = normalize_mat(mats[i, j])
except: # noqa
continue # Singular Matrix or NaNs
result = quat2mat(euler2quat(mat2euler(mat)))
np.testing.assert_allclose(mat, result, atol=1e-8, rtol=1e-6)
def test_quat_average(self):
max_angle = 1.0
euler1 = np.zeros(3)
euler2 = np.array([max_angle, 0.0, 0.0])
q1 = euler2quat(euler1)
q2 = euler2quat(euler2)
assert_allclose(q1, quat_average([q1]))
mid_q = quat_average([q1, q2])
assert_allclose(quat2euler(mid_q), [max_angle / 2.0, 0.0, 0.0])
for weight in [0.0, 0.5, 1.0]:
avg_q = quat_average([q1, q2], weights=[1 - weight, weight])
assert_allclose(quat2euler(avg_q), [max_angle * weight, 0.0, 0.0])
def test_quat_normalize(self):
""" Test quaternion normalization """
q1 = np.array([1.0, 0.0, 0.0, 0.0])
q2 = np.array([-1.0, 0.0, 0.0, 0.0])
q3 = np.array([0.0, 1.0, 0.0, 0.0])
assert np.linalg.norm(quat_normalize(q1) - q1) < 1e-8
assert np.linalg.norm(quat_normalize(q2) + q2) < 1e-8
assert np.linalg.norm(quat_normalize(q3) - q3) < 1e-8
for q in [q1, q2, q3]:
assert quat_normalize(q)[0] >= 0.0
def test_any_orthogonal(self):
""" Test finding any orthogonal vector to given """
vectors = [
np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([0, 0, 1]),
random_unit_length_vec(),
random_unit_length_vec(),
random_unit_length_vec(),
]
for v in vectors:
orthogonal = any_orthogonal(v)
# Vectors are indeed orthogonal
assert np.abs(np.dot(v, orthogonal)) < 1e-8
# orthogonal vector has unit length
assert np.abs(np.linalg.norm(orthogonal) - 1) < 1e-8
def test_vectors2quat(self):
""" Test constructing quaternion from two given vectors """
vectors = [
np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([0, 0, 1]),
-np.array([1, 0, 0]),
-np.array([0, 1, 0]),
-np.array([0, 0, 1]),
random_unit_length_vec(),
random_unit_length_vec(),
random_unit_length_vec(),
]
for v1, v2 in it.product(vectors, vectors):
quat = vectors2quat(v1, v2)
mat = quat2mat(quat)
maybe_v2 = mat @ v1
# Test that quat is normalized
assert np.abs(np.linalg.norm(quat) - 1.0) < 1e-8
# Make sure that given quaterion is the shortest path rotation
# np.clip is necessary due to potential minor numerical instabilities
assert quat_magnitude(quat) <= np.arccos(np.clip(v1 @ v2, -1.0, 1.0)) + 1e-8
# Test that quaternion rotates input vector to output vector
assert np.linalg.norm(maybe_v2 - v2) < 1e-8
def test_rot_xyz_aligned():
""" Test function 'rot_xyz_aligned' """
# Identity configuration
initial_configuration = np.array([1.0, 0.0, 0.0, 0.0])
# Cube is aligned in initial condition
assert rot_xyz_aligned(initial_configuration, 0.01)
# Rotate along each axis more than the threshold
transformations = np.eye(3) * 0.5
for i in range(3):
quat = euler2quat(transformations[i])
if i in [0, 1]:
# For rotations along x,y cube is not aligned
assert not rot_xyz_aligned(quat, 0.4)
else:
# Cube is aligned for rotation along z axis
assert rot_xyz_aligned(quat, 0.4)
# Rotate along each axis so much that the threshold is met again
transformations = np.eye(3) * (np.pi / 2 - 0.3)
for i in range(3):
quat = euler2quat(transformations[i])
# Cube is aligned again
assert rot_xyz_aligned(quat, 0.4)
| 9,198 | 32.089928 | 88 | py |
robogym | robogym-master/robogym/utils/tests/test_rubik_utils.py | import unittest
import pycuber
from robogym.utils.rubik_utils import solve_fast
class RubikTest(unittest.TestCase):
def test_solver(self):
cube = pycuber.Cube()
initial_cube = str(cube)
alg = pycuber.Formula()
random_alg = alg.random()
cube(random_alg)
assert initial_cube != str(cube), "Randomization haven't worked."
solution = solve_fast(cube)
print(solution)
for step in solution.split(" "):
cube.perform_step(step)
assert initial_cube == str(cube), "Fast solution doesn't work"
| 583 | 26.809524 | 73 | py |
robogym | robogym-master/robogym/randomization/sim.py | import abc
import copy
from typing import List, Union
import numpy as np
from mujoco_py import MjSim
from numpy.random import RandomState
from robogym.mujoco.constants import OPT_FIELDS, PID_GAIN_PARAMS
from robogym.randomization.common import Randomizer
from robogym.randomization.parameters import (
FloatRandomizerParameter,
RandomizerParameter,
)
from robogym.utils.rotation import random_unity2
def has_prefixes(string, prefixes):
if isinstance(prefixes, list):
for p in prefixes:
if has_prefixes(string, p):
return True
return False
else:
return string.startswith(prefixes)
class SimulationRandomizer(Randomizer[MjSim], abc.ABC):
"""
Randomizer which randomize randomization.
"""
def __init__(self, name):
super().__init__(name, enabled=True)
self.sim = None
self._initial_value = None
self._params = self._prepare_randomizer_params()
def initialize(self, sim: MjSim):
"""
Initialize state for the randomizer. This method will be called
every time a different sim instance is passed in.
"""
self.sim = sim
self._initialize()
def _initialize(self):
"""
Add additional initialization logic.
"""
pass
def _randomize(self, target: MjSim, random_state: RandomState):
if target != self.sim:
self.initialize(target)
self._randomize_sim(random_state)
return self.sim
@abc.abstractmethod
def _randomize_sim(self, random_state: RandomState):
"""
Implement this method to apply randomization to self.sim
"""
pass
def _register_sim_parameter(
self,
name="value",
initial_value=0.0,
value_min=-4.0,
value_max=4.0,
delta=None,
):
if delta is None:
delta = self._default_delta(value_min, value_max)
return self.register_parameter(
FloatRandomizerParameter(
name,
initial_value=initial_value,
value_range=(value_min, value_max),
delta=delta,
)
)
@staticmethod
def _default_delta(value_min, value_max):
"""
If no delta is provided for given parameter, this method will be called to get
default delta.
"""
return None
@abc.abstractmethod
def _prepare_randomizer_params(
self,
) -> Union[RandomizerParameter, List[RandomizerParameter]]:
"""
Return all randomizer parameters associated with this randomizer.
"""
pass
@property
def _randomizer_param_values(self) -> Union[float, np.ndarray]:
"""
Get numerical values for all randomizer parameters.
"""
if isinstance(self._params, RandomizerParameter):
return self._params.get_value()
else:
return np.array([param.get_value() for param in self._params])
class GravityRandomizer(SimulationRandomizer):
def __init__(self):
super().__init__("gravity")
def _initialize(self):
self._initial_value = self.sim.model.opt.gravity.copy()
def _prepare_randomizer_params(self):
return self._register_sim_parameter(value_min=0.0)
def _randomize_sim(self, random_state: RandomState):
direction = random_unity2(random_state)
mag = np.exp(self._randomizer_param_values) - 1.0
noise = direction * 1.0 * mag
self.sim.model.opt.gravity[:] = self._initial_value + noise
@staticmethod
def _default_delta(value_min, value_max):
return (value_max - value_min) / 10
class PidRandomizer(SimulationRandomizer):
def __init__(self, field_name):
super().__init__(field_name)
self._idx = PID_GAIN_PARAMS.index(field_name)
def _initialize(self):
self._initial_value = copy.deepcopy(
self.sim.model.actuator_gainprm[:, self._idx]
)
def _prepare_randomizer_params(self):
return [
self._register_sim_parameter("mean"),
self._register_sim_parameter("std", value_min=0.0),
]
def _randomize_sim(self, random_state: RandomState):
values = self._randomizer_param_values
assert isinstance(values, np.ndarray)
self.sim.model.actuator_gainprm[:, self._idx] = self._initial_value * np.exp(
random_state.normal(
values[0], scale=abs(values[1]), size=self._initial_value.shape
)
)
class JointMarginRandomizer(SimulationRandomizer):
def __init__(self):
super().__init__("jnt_margin")
def _initialize(self):
self._initial_value = copy.deepcopy(self.sim.model.jnt_margin)
def _prepare_randomizer_params(self):
return self._register_sim_parameter(value_min=0.0)
def _randomize_sim(self, random_state: RandomState):
new_values = self._initial_value + (
random_state.uniform(size=self._initial_value.shape)
* (np.exp(self._randomizer_param_values) - 1.0)
* 0.15
)
self.sim.model.jnt_margin[:] = new_values
class GeomSolimpRandomizer(SimulationRandomizer):
PARAMETER_NAMES = [
"dmax_mean",
"dmax_std",
"delta_mean",
"delta_std",
"width_mean",
"width_std",
]
def __init__(self, drange=(0.5, 0.99)):
assert len(drange) == 2
super().__init__("geom_solimp")
self._drange = drange
self.parameters_shape = [6]
self.parameter_names = self.PARAMETER_NAMES
self.positive = [False, True, False, True, False, True]
def _initialize(self):
# Only take first three parameters
self._initial_value = copy.deepcopy(self.sim.model.geom_solimp[:, :3])
assert self._initial_value.shape[1] == 3
def _prepare_randomizer_params(self):
params = []
for i in range(0, len(self.PARAMETER_NAMES), 2):
params.extend(
[
self._register_sim_parameter(name=self.PARAMETER_NAMES[i]),
self._register_sim_parameter(
name=self.PARAMETER_NAMES[i + 1], value_min=0.0
),
]
)
return params
def _randomize_sim(self, random_state: RandomState):
values = self._randomizer_param_values
assert isinstance(values, np.ndarray)
dmax_mean, dmax_std, delta_mean, delta_std, width_mean, width_std = values
assert dmax_std >= 0.0
assert delta_std >= 0.0
assert width_std >= 0.0
# We randomize (1-dmax) since dmax typically very close to 1 and we'd like to cover the
# range [0, 1] well. We then sample delta that is subtracted from dmax to produce dmin,
# thus ensuring that dmin <= dmax holds.
dmax = 1.0 - (1.0 - self._initial_value[:, 1]) * np.exp(
random_state.normal(
dmax_mean, scale=dmax_std, size=self._initial_value.shape[0]
)
)
dmax = np.clip(dmax, *self._drange)
delta = (self._initial_value[:, 1] - self._initial_value[:, 0]) * np.exp(
random_state.normal(
delta_mean, scale=delta_std, size=self._initial_value.shape[0]
)
)
dmin = np.clip(dmax - delta, *self._drange)
# Sample width.
width = self._initial_value[:, 2] * np.exp(
random_state.normal(
width_mean, scale=width_std, size=self._initial_value.shape[0]
)
)
# Validate constraints. Mujoco internally already ensures that dmin and dmax are clipped,
# if necessary (http://mujoco.org/book/modeling.html#CSolver), but we enforce slightly
# stronger constraints for additional stability.
assert dmin.shape == dmax.shape == width.shape
assert (dmin <= dmax).all()
assert (self._drange[0] <= dmin).all()
assert (dmin <= self._drange[1]).all()
assert (self._drange[0] <= dmax).all()
assert (dmax <= self._drange[1]).all()
self.sim.model.geom_solimp[:, 0] = dmin
self.sim.model.geom_solimp[:, 1] = dmax
self.sim.model.geom_solimp[:, 2] = width
class GeomSolrefRandomizer(SimulationRandomizer):
PARAMETER_NAMES = [
"timeconst_mean",
"timeconst_std",
"dampratio_mean",
"dampratio_std",
]
def __init__(self):
super().__init__("geom_solref")
def _initialize(self):
self._initial_value = copy.deepcopy(self.sim.model.geom_solref)
def _prepare_randomizer_params(self):
params = []
for i in range(0, len(self.PARAMETER_NAMES), 2):
params.extend(
[
self._register_sim_parameter(name=self.PARAMETER_NAMES[i]),
self._register_sim_parameter(
name=self.PARAMETER_NAMES[i + 1], value_min=0.0
),
]
)
return params
def _randomize_sim(self, random_state: RandomState):
values = self._randomizer_param_values
assert isinstance(values, np.ndarray)
timeconst_mean, timeconst_std, dampratio_mean, dampratio_std = values
assert timeconst_std >= 0.0
assert dampratio_std >= 0.0
self.sim.model.geom_solref[:, 0] = self._initial_value[:, 0] * np.exp(
random_state.normal(
timeconst_mean, scale=timeconst_std, size=self._initial_value.shape[0]
)
)
self.sim.model.geom_solref[:, 1] = self._initial_value[:, 1] * np.exp(
random_state.normal(
dampratio_mean, scale=dampratio_std, size=self._initial_value.shape[0]
)
)
class GenericSimRandomizer(SimulationRandomizer):
def __init__(
self,
name,
field_name,
apply_mode="uncoupled_mean_variance",
coef=1.0,
geom_prefix=None,
body_prefix=None,
dof_jnt_prefix=None,
jnt_prefix=None,
positive_only=False,
zero_threshold=0.0,
):
"""
Generic randomizer for mujoco fields.
:param field_name: name of the field to randomize (there must be a field in `sim.model`
or `sim.model.opt` with the given name)
:param apply_mode: specifies how to apply environment parameters to environment 'sample'
samples environments based on the distribution defined by the environment parameters
and 'set' applys environment parameters directly to the environment.
:param coef: a scalar by which environment parameters are multiplied before being applied
:param geom_prefix: If not None then this randomizer will only affect the geoms
that has this prefix
:param dof_jnt_prefix: If not None then this randomizer will only affect the
DOFs that are associated with a joint with this prefix.
:param jnt_prefix: If not None then this randomizer will only affect the
joints that have this prefix.
:param positive_only: If True, then the given mujoco field will only be
set to positive values.
:param zero_threshold: Maximum fraction of original values that are allowed to be zero,
only applicable to multiplicative modes.
"""
self._apply_mode = apply_mode
super().__init__(name)
self._field_name = field_name
self._is_opt = field_name in OPT_FIELDS
self._coef = coef
self._positive_only = positive_only
self._geom_prefix = geom_prefix
self._body_prefix = body_prefix
self._dof_jnt_prefix = dof_jnt_prefix
self._jnt_prefix = jnt_prefix
self._zero_threshold = zero_threshold
self._ids = None
def _initialize(self):
self._ids = self.identify_fields(
self._geom_prefix,
self._body_prefix,
self._dof_jnt_prefix,
self._jnt_prefix,
)
self._initial_value = copy.deepcopy(self.get_params())
self.multiplicative_mode_sanity_check(self._zero_threshold)
def _prepare_randomizer_params(
self,
) -> Union[RandomizerParameter, List[RandomizerParameter]]:
if self._apply_mode in (
"coupled",
"uncoupled",
"coupled_mean_variance",
"max_additive",
):
params = self._register_sim_parameter()
elif self._apply_mode in (
"coupled_additive",
"coupled_symmetric_ranges",
"variance",
"variance_additive",
):
params = self._register_sim_parameter(value_min=0.0)
elif self._apply_mode in ("ranges", "coupled_ranges", "semicorrelated"):
params = [
self._register_sim_parameter(name="low"),
self._register_sim_parameter(name="high"),
]
elif self._apply_mode == "variance_mean_additive":
params = [
self._register_sim_parameter(name="mean", value_min=0.0),
self._register_sim_parameter(name="std", value_min=0.0),
]
elif self._apply_mode == "uncoupled_mean_variance":
params = [
self._register_sim_parameter(name="mean"),
self._register_sim_parameter(name="std", value_min=0.0),
]
else:
raise ValueError("Invalid mode: {}".format(self._apply_mode))
return params
@staticmethod
def _default_delta(value_min, value_max):
return (value_max - value_min) / 10
def multiplicative_mode_sanity_check(self, zero_threshold):
"""
Ensure that multiplicative apply modes are not applied to parameters whose initial
values are mostly zeros.
"""
multiplicative_apply_modes = {
"coupled",
"uncoupled",
"ranges",
"coupled_ranges",
"semicorrelated",
"coupled_symmetric_ranges",
"variance",
"coupled_mean_variance",
"uncoupled_mean_variance",
}
if self._apply_mode in multiplicative_apply_modes:
params = self._initial_value
zeros = np.isclose(params, 0.0).mean()
assert zeros <= zero_threshold, (
f"Mode is multiplicative on field {self._field_name}, but too many "
f"values are zero, maximum fraction allowed is {zero_threshold:.3f} but got "
f"{zeros:.3f}: {self._initial_value}. If you think that is expected, please "
f"adjust the zero_threshold value or add an exception above."
)
def identify_fields(self, geom_prefix, body_prefix, dof_jnt_prefix, jnt_prefix):
if geom_prefix is not None:
assert self._field_name.startswith("geom_")
geom_names = [
name
for name in self.sim.model.geom_names
if has_prefixes(name, geom_prefix)
]
ids = np.array(
sorted([self.sim.model.geom_name2id(name) for name in geom_names])
)
elif body_prefix is not None:
assert self._field_name.startswith("body_")
body_names = [
name
for name in self.sim.model.body_names
if has_prefixes(name, body_prefix)
]
ids = np.array(
sorted([self.sim.model.body_name2id(name) for name in body_names])
)
elif dof_jnt_prefix is not None:
def has_prefix(jnt_id):
return has_prefixes(
self.sim.model.joint_id2name(jnt_id), dof_jnt_prefix
)
assert self._field_name.startswith("dof_")
ids = [
idx
for idx, jnt_id in enumerate(self.sim.model.dof_jntid)
if has_prefix(jnt_id)
]
elif jnt_prefix is not None:
def has_prefix(jnt_id):
return has_prefixes(self.sim.model.joint_id2name(jnt_id), jnt_prefix)
assert self._field_name.startswith("jnt_")
ids = [
idx for idx in range(len(self.sim.model.jnt_type)) if has_prefix(idx)
]
else:
ids = None
if ids is not None:
ids = np.array(sorted(ids))
assert len(ids) > 0, "no IDs matched for {}".format(self._field_name)
else:
ids = None
return ids
def __repr__(self):
return "<{} : {}>".format(self.__class__.__name__, self._field_name)
def _get_params(self):
if self._is_opt:
return getattr(self.sim.model.opt, self._field_name)
return getattr(self.sim.model, self._field_name)
def get_params(self):
out = self._get_params()
if self._ids is not None:
return out[self._ids]
return out
def set_params(self, new_values):
v = self._get_params()
if self._ids is not None:
v[self._ids] = new_values
else:
v[:] = new_values
def _randomize_sim(self, random_state: RandomState):
param_value = self._randomizer_param_values * self._coef
if self._apply_mode == "coupled":
new_value = self._initial_value * np.exp(param_value)
elif self._apply_mode == "coupled_additive":
new_value = self._initial_value + (np.exp(param_value) - 1.0)
elif self._apply_mode == "uncoupled":
new_value = self._initial_value * np.exp(
random_state.normal(param_value, size=self._initial_value.shape)
* np.absolute(param_value)
)
elif self._apply_mode == "ranges":
low = min(0, -param_value[0])
high = max(0, param_value[1])
new_value = self._initial_value * np.exp(
random_state.uniform(low, high, size=self._initial_value.shape)
)
elif self._apply_mode == "coupled_ranges":
low = min(0, -param_value[0])
high = max(0, param_value[1])
new_value = self._initial_value * np.exp(random_state.uniform(low, high))
elif self._apply_mode == "coupled_symmetric_ranges":
low = -abs(param_value)
high = abs(param_value) # This is intentially domain_param_value
new_value = self._initial_value * np.exp(
random_state.uniform(low, high, size=self._initial_value.shape)
)
elif self._apply_mode == "variance":
variance = abs(param_value)
new_value = self._initial_value * np.exp(
random_state.normal(0, size=self._initial_value.shape) * variance
)
elif self._apply_mode == "variance_additive":
scale = np.exp(abs(param_value)) - 1.0
noise = random_state.normal(0, scale=scale, size=self._initial_value.shape)
new_value = self._initial_value + noise
elif self._apply_mode == "variance_mean_additive":
pos = np.exp(param_value[0]) - 1.0
scale = np.exp(abs(param_value[1])) - 1.0
noise = np.abs(
random_state.normal(pos, scale=scale, size=self._initial_value.shape)
)
new_value = self._initial_value + noise
elif self._apply_mode == "coupled_mean_variance":
new_value = self._initial_value * np.exp(
random_state.normal(
param_value, scale=abs(param_value), size=self._initial_value.shape
)
)
elif self._apply_mode == "uncoupled_mean_variance":
new_value = self._initial_value * np.exp(
random_state.normal(
param_value[0],
scale=abs(param_value[1]),
size=self._initial_value.shape,
)
)
elif self._apply_mode == "max_additive":
high = np.exp(abs(param_value)) - 1.0
noise = random_state.uniform(
low=0, high=high, size=self._initial_value.shape
)
new_value = self._initial_value + noise
else:
raise RuntimeError()
if self._positive_only:
new_value = np.clip(new_value, 0, np.inf)
self.set_params(new_value)
| 20,619 | 33.949153 | 97 | py |
robogym | robogym-master/robogym/randomization/action.py | import abc
import numpy as np
from robogym.randomization.common import Randomizer
class ActionRandomizer(Randomizer[np.ndarray], abc.ABC):
"""
Randomizer which randomize action.
"""
pass
| 208 | 13.928571 | 56 | py |
robogym | robogym-master/robogym/randomization/observation.py | import abc
from typing import Dict
import numpy as np
from robogym.randomization.common import Randomizer
class ObservationRandomizer(Randomizer[Dict[str, np.ndarray]], abc.ABC):
"""
Randomizer which randomize randomization.
"""
pass
| 255 | 16.066667 | 72 | py |
robogym | robogym-master/robogym/randomization/common.py | import abc
from collections import OrderedDict
from enum import Enum
from typing import Dict, Generic, List, Optional, Tuple, TypeVar
import numpy as np
VType = TypeVar("VType", int, float)
class DType(Enum):
INT = (1,)
FLOAT = 2
class RandomizerParameter(Generic[VType], abc.ABC):
"""
Base interface for randomizer parameter.
"""
INT = DType.INT
FLOAT = DType.FLOAT
def __init__(
self,
name: str,
initial_value: VType,
value_range: Tuple[VType, VType],
delta: Optional[VType] = None,
):
self.name = name
self._value_range: Tuple[VType, VType] = self._convert_range(value_range)
self._value: VType = self._convert_value(initial_value)
self._delta: Optional[VType] = self._convert_delta(delta)
################################################
# External APIs to interact with domain randomization.
def get_value(self) -> VType:
return self._value
def set_value(self, value: VType):
self._value = self._convert_value(value)
def get_range(self) -> Tuple[VType, VType]:
return self._value_range
def get_delta(self) -> Optional[VType]:
return self._delta
@property
@abc.abstractmethod
def dtype(self):
pass
################################################
# Internal methods.
def _convert_value(self, value: VType) -> VType:
low, high = self.get_range()
value = self._convert_type(value)
assert (
low <= value <= high
), ( # type: ignore
f"Value {value} is not within range of [{low}, {high}]"
)
return value
def _convert_range(self, value_range: Tuple[VType, VType]) -> Tuple[VType, VType]:
assert (
len(value_range) == 2
), f"Invalid range {value_range}, must tuple of two values."
low, high = value_range
return self._convert_type(low), self._convert_type(high)
def _convert_delta(self, delta: Optional[VType]):
if delta is not None:
return self._convert_type(delta)
else:
return None
@classmethod
@abc.abstractmethod
def _convert_type(cls, val: VType) -> VType:
pass
def __repr__(self):
return (
f"{self.__class__}(\n"
f"value={self.get_value()}\n"
f"range={self.get_range()}\n"
f")"
)
TType = TypeVar("TType")
class Randomizer(abc.ABC, Generic[TType]):
"""
Base interface for a randomizer.
"""
def __init__(self, name: str, enabled: bool = True):
self.name = name
self._parameters: Dict[str, RandomizerParameter] = OrderedDict()
self._enabled = enabled
def randomize(self, target: TType, random_state: np.random.RandomState) -> TType:
if self._enabled:
return self._randomize(target, random_state)
else:
return target
@abc.abstractmethod
def _randomize(self, target: TType, random_state: np.random.RandomState) -> TType:
pass
def get_parameters(self) -> List[RandomizerParameter]:
"""
Return all parameters for this randomizer.
"""
return list(self._parameters.values())
def get_parameter(self, name: str) -> RandomizerParameter:
"""
Get parameter by name.
"""
assert (
name in self._parameters
), f"Parameter {name} does not exist in randomizer {self.name}."
return self._parameters[name]
def register_parameter(self, parameter: RandomizerParameter):
"""
Register a parameter for this randomizer.
"""
assert (
parameter.name not in self._parameters
), f"Parameter with name {parameter.name} already exists."
self._parameters[parameter.name] = parameter
return parameter
def enable(self):
"""
Enable the randomizer.
"""
self._enabled = True
def disable(self):
self._enabled = False
@property
def enabled(self):
return self._enabled
def reset(self):
"""
Reset state of the randomizer. Called during environment reset.
"""
pass
RType = TypeVar("RType", bound=Randomizer)
class RandomizerCollection(Generic[RType]):
"""
Interface for collection of randomizers, it provides functionality
to register child randomizers and retrieve their parameters.
"""
def __init__(self):
self._randomizers = OrderedDict()
def register_randomizer(self, randomizer: RType) -> RType:
"""
Add a randomizer to the collection.
"""
assert (
randomizer.name not in self._randomizers
), f"Randomizer with name {randomizer.name} already exists."
self._randomizers[randomizer.name] = randomizer
return randomizer
def get_randomizers(self) -> List[RType]:
"""
Get all randomizers.
"""
return list(self._randomizers.values())
def get_randomizer(self, name) -> RType:
"""
Get randomizer by name.
"""
assert name in self._randomizers, f"Randomizer {name} does not exist"
return self._randomizers[name]
def _get_randomizer_parameters(self) -> List[RandomizerParameter]:
parameters = []
for randomizer in self.get_randomizers():
parameters.extend(randomizer.get_parameters())
return parameters
class ChainedRandomizer(
Randomizer[TType], RandomizerCollection[RType], Generic[TType, RType],
):
"""
Base class for randomizer which is composition of multiple randomizers.
During randomize, it will each randomizer in order on given target, for example
ChainedRandomizer('cr', [r1, r2, r3]).randomize(target) is equivalent to
r1.randomize(r2.randomize(r3.randomize(target)))
"""
def __init__(self, name, randomizers: List[RType]):
Randomizer.__init__(self, name, enabled=True)
RandomizerCollection.__init__(self) # type: ignore
for randomizer in randomizers:
self.register_randomizer(randomizer)
def _randomize(self, target: TType, random_state: np.random.RandomState) -> TType:
for randomizer in self.get_randomizers():
target = randomizer.randomize(target, random_state)
return target
def get_parameters(self):
return self._get_randomizer_parameters()
def reset(self):
for randomizer in self.get_randomizers():
randomizer.reset()
| 6,630 | 26.17623 | 86 | py |
robogym | robogym-master/robogym/randomization/parameters.py | from typing import Optional, Tuple
import numpy as np
from robogym.randomization.common import RandomizerParameter
MAX_INT = int(1e9) # This is reasonably large enough for any integer parameter.
class IntRandomizerParameter(RandomizerParameter[int]):
"""
Randomizer parameter of scalar int data type.
"""
def __init__(
self,
name: str,
initial_value: int,
value_range: Tuple[int, int] = (-MAX_INT, MAX_INT),
delta: Optional[int] = None,
):
super().__init__(name, initial_value, value_range, delta=delta)
@property
def dtype(self):
return RandomizerParameter.INT
@classmethod
def _convert_type(cls, val: int):
return int(val)
class FloatRandomizerParameter(RandomizerParameter[float]):
"""
Randomizer parameter of scalar float data type.
"""
def __init__(
self,
name: str,
initial_value: float,
value_range: Tuple[float, float] = (-np.inf, np.inf),
delta: Optional[float] = None,
):
super().__init__(name, initial_value, value_range, delta=delta)
@property
def dtype(self):
return RandomizerParameter.FLOAT
@classmethod
def _convert_type(cls, val: float):
return np.float32(val)
| 1,292 | 22.944444 | 80 | py |
robogym | robogym-master/robogym/randomization/env.py | from typing import (
Any,
Dict,
Generic,
Iterable,
List,
NamedTuple,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import attr
import numpy as np
from robogym.randomization.action import ActionRandomizer
from robogym.randomization.common import (
ChainedRandomizer,
Randomizer,
RandomizerCollection,
RandomizerParameter,
VType,
)
from robogym.randomization.observation import ObservationRandomizer
from robogym.randomization.parameters import (
FloatRandomizerParameter,
IntRandomizerParameter,
)
from robogym.randomization.sim import SimulationRandomizer
MYPY = False
if MYPY:
from robogym.robot_env import RobotEnvParameters
bound = RobotEnvParameters
else:
bound = None
PType = TypeVar("PType", bound=bound)
def build_randomizable_param(
default: Optional[VType] = None,
low: Optional[VType] = None,
high: Optional[VType] = None,
) -> VType:
"""
Create parameter attribute which will be automatically registered as randomization
parameters. If you create an attribute using this function you can directly
randomize it using ADR config.
:param default: Default value for this parameter.
:param low: Low end of range of this parameter.
:param high: High end of range for this parameter.
:return: The parameter attribute.
"""
if low is None:
low = -np.inf
if high is None:
high = np.inf
def value_in_range(_, attribute, value):
assert (
low - 1e-6 <= value <= high + 1e-6
), f"Wrong value for {attribute.name}: {value}, must be in [{low}, {high}]"
kwargs: Dict[str, Any] = {"validator": value_in_range}
if default is not None:
kwargs["default"] = default
return attr.ib(
metadata={"randomizable": True, "low": low, "high": high}, **kwargs,
)
class _RandomizableParam(NamedTuple, Generic[VType]):
name: str
value_type: Type[VType]
default: VType
value_range: Tuple[VType, VType]
parent_instance: Any
def enumerate_randomizable_params(parameters: PType) -> Iterable[_RandomizableParam]:
"""
Recursively enumerate all randomizable params under given parameters type.
return iterable of _RandomizableParam for each randomizable parameter.
:param parameters: The parameters instance.
"""
parameters_type = type(parameters)
for field in attr.fields(parameters_type):
metadata = field.metadata
name = field.name
if metadata.get("randomizable", False):
assert field.type
assert field.default is not None
yield _RandomizableParam(
name=name,
value_type=field.type,
default=getattr(parameters, name),
value_range=(metadata["low"], metadata["high"]),
parent_instance=parameters,
)
assert field.type, f"No type available for field {field}"
if attr.has(field.type):
child_instance = getattr(parameters, name)
for param in enumerate_randomizable_params(child_instance):
yield param
class EnvParameterRandomizer(Randomizer[PType]):
"""
Randomizer which randomize environment parameters which
is used to initialize environment and simulation. This randomizer
will be invoked once per environment reset.
"""
VALUE_TYPE_TO_PARAMETER_TYPE: Dict[type, Type[RandomizerParameter]] = {
int: IntRandomizerParameter,
float: FloatRandomizerParameter,
}
def __init__(self, parameters: PType):
super().__init__("parameters")
for param in enumerate_randomizable_params(parameters):
randomizer_parameter_type = self.VALUE_TYPE_TO_PARAMETER_TYPE[
param.value_type
]
self.register_parameter(
randomizer_parameter_type(param.name, param.default, param.value_range)
)
def _randomize(self, target: PType, random_state: np.random.RandomState):
for param in enumerate_randomizable_params(target):
setattr(
param.parent_instance,
param.name,
self.get_parameter(param.name).get_value(),
)
return target
class EnvObservationRandomizer(ChainedRandomizer[dict, ObservationRandomizer]):
def __init__(self, randomizers: List[ObservationRandomizer]):
super().__init__("observation", randomizers)
class EnvActionRandomizer(ChainedRandomizer[dict, ActionRandomizer]):
def __init__(self, randomizers: List[ActionRandomizer]):
super().__init__("action", randomizers)
class EnvSimulationRandomizer(ChainedRandomizer[dict, SimulationRandomizer]):
def __init__(self, randomizers: List[SimulationRandomizer]):
super().__init__("sim", randomizers)
RType = TypeVar("RType")
class EnvRandomization(RandomizerCollection[Randomizer], Generic[PType]):
"""
Top level object which contains all randomizers for the environment.
This class provides the interface for a Domain Randomization (DR) framework
to interact with the environment and update its randomized parameters to new
values.
The top level flow is as below:
1. Domain Randomization call get_parameters to get all randomized
env parameters.
2. Domain Randomization calculates new value for the randomized env parameters.
3. Domain Randomization calls parameter.set_value to update randomized
env parameter value.
Parameter can be defined in jsonnet domain element with the following schema:
domain_elements_configs: {
<parameter_path>: {
args : <parameter_args>
},
}
where parameter path is defined as <dot separated list of randomizer chain>:<parameter name>
e.g.
parameters:num_objects # num_objects parameter for parameters randomizer.
observation.observation_delay:mean # mean parameter for observation_delay randomizer under
observation randomizer.
sim.gravity:value # value parameter for gravity randomizer under simulation randomizer.
"""
def __init__(
self,
*,
parameter_randomizer: EnvParameterRandomizer[PType],
observation_randomizer: EnvObservationRandomizer,
action_randomizer: EnvActionRandomizer,
simulation_randomizer: EnvSimulationRandomizer,
):
super().__init__()
self.parameter_randomizer = self.register_randomizer(parameter_randomizer)
self.observation_randomizer = self.register_randomizer(observation_randomizer)
self.action_randomizer = self.register_randomizer(action_randomizer)
self.simulation_randomizer = self.register_randomizer(simulation_randomizer)
def get_parameters(self):
"""
Get all randomization parameters for the environment.
"""
return self._get_randomizer_parameters()
def get_parameter(self, path: str) -> RandomizerParameter:
parts = path.split(":")
assert len(parts) == 2, f"Invalid parameter path {path}."
path, param_name = parts
randomizer_names = path.split(".")
parent: Union[RandomizerCollection, Randomizer] = self
for name in randomizer_names:
assert isinstance(
parent, RandomizerCollection
), f"{name} of randomizer path {path} is not a randomizer collection."
parent = parent.get_randomizer(name)
assert isinstance(parent, Randomizer)
return parent.get_parameter(param_name)
def update_parameter(
self, path: str, value: VType,
):
parameter = self.get_parameter(path)
parameter.set_value(value)
def reset(self):
"""
Reset randomizer state. Will be called during environment reset.
"""
for randomizer in self.get_randomizers():
randomizer.reset()
| 7,963 | 29.281369 | 96 | py |
robogym | robogym-master/robogym/randomization/tests/test_sim_randomization.py | import numpy as np
from robogym.envs.rearrange.blocks import BlockRearrangeEnv
from robogym.randomization.sim import (
GenericSimRandomizer,
GeomSolimpRandomizer,
GeomSolrefRandomizer,
GravityRandomizer,
JointMarginRandomizer,
PidRandomizer,
)
class TestEnv(BlockRearrangeEnv):
@classmethod
def build_simulation_randomizers(cls, constants):
return [
GravityRandomizer(),
JointMarginRandomizer(),
PidRandomizer("pid_kp"),
GenericSimRandomizer(
name="dof_frictionloss_robot",
field_name="dof_frictionloss",
dof_jnt_prefix="robot0:",
apply_mode="uncoupled_mean_variance",
),
GeomSolimpRandomizer(),
GeomSolrefRandomizer(),
]
def test_sim_randomization():
def get_mujoco_values(mj_sim):
return [
value_getter(mj_sim).copy()
for value_getter in [
lambda sim: sim.model.opt.gravity,
lambda sim: sim.model.jnt_margin,
lambda sim: sim.model.actuator_gainprm,
lambda sim: sim.model.dof_frictionloss,
lambda sim: sim.model.geom_solref,
lambda sim: sim.model.geom_solimp,
]
]
env = TestEnv.build()
env.reset()
original_values = get_mujoco_values(env.sim)
parameters = env.unwrapped.randomization.simulation_randomizer.get_parameters()
initial_param_values = [param.get_value() for param in parameters]
# Update parameters.
for param in parameters:
low, high = param.get_range()
param.set_value(np.random.uniform(low, high))
for _ in range(3):
env.reset()
new_values = get_mujoco_values(env.sim)
for original_value, new_value in zip(original_values, new_values):
assert not np.allclose(original_value, new_value)
# Reset parameter back to original values.
for param, initial_value in zip(parameters, initial_param_values):
param.set_value(initial_value)
env.reset()
new_values = get_mujoco_values(env.sim)
# Make sure parameter value doesn't drift away.
for original_value, new_value in zip(original_values, new_values):
assert np.allclose(original_value, new_value)
| 2,343 | 29.051282 | 83 | py |
robogym | robogym-master/robogym/randomization/tests/test_randomization.py | import unittest
import attr
import numpy as np
from robogym.randomization.env import (
EnvActionRandomizer,
EnvObservationRandomizer,
EnvParameterRandomizer,
EnvRandomization,
EnvSimulationRandomizer,
build_randomizable_param,
)
from robogym.randomization.observation import ObservationRandomizer
from robogym.randomization.parameters import FloatRandomizerParameter
class DummyRandomizerParameter(FloatRandomizerParameter):
def __init__(self, name, val):
super().__init__(
name, val, value_range=(-1.0, 1.0), delta=1.0,
)
@attr.s(auto_attribs=True)
class DummyNestedEnvParameter:
c: int = build_randomizable_param(1, low=-3, high=3)
@attr.s(auto_attribs=True)
class DummyEnvParameter:
a: int = build_randomizable_param(0, low=-5, high=5)
b: float = build_randomizable_param(0.0, low=-1.0, high=1.0)
x: int = 0 # Non randomizable parameter.
nested: DummyNestedEnvParameter = DummyNestedEnvParameter()
class DummyObservationRandomizer(ObservationRandomizer):
def __init__(self, name, val):
super().__init__(name)
self.val = self.register_parameter(val)
def _randomize(self, target, random_state):
target[self.val.name] = self.val.get_value()
return target
class TestRandomization(unittest.TestCase):
def setUp(self):
super().setUp()
self.random_state = np.random.RandomState()
def test_randomizer_parameters(self):
parameter = DummyRandomizerParameter("foo", 0.0)
assert parameter.get_value() == 0.0
assert parameter.get_range() == (-1.0, 1.0)
assert parameter.get_delta() == 1.0
parameter.set_value(1.0)
assert parameter.get_value() == 1.0
def test_randomizer_basic(self):
"""
Test functionality of basic randomizer.
"""
randomizer = EnvParameterRandomizer(DummyEnvParameter())
assert len(randomizer.get_parameters()) == 3
# Make sure register duplicate parameter is not allowed.
with self.assertRaises(AssertionError):
randomizer.register_parameter(DummyRandomizerParameter("a", 1))
randomizer.register_parameter(DummyRandomizerParameter("d", 1))
assert len(randomizer.get_parameters()) == 4
randomizer.get_parameter("a").set_value(1)
randomizer.get_parameter("b").set_value(0.5)
randomizer.get_parameter("c").set_value(2)
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
assert parameters.a == 1
assert parameters.b == 0.5
assert parameters.nested.c == 2
randomizer.disable()
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
randomizer.get_parameter("a").set_value(1)
assert parameters.a == 0
def test_observation_randomizer(self):
randomizer = EnvObservationRandomizer(
[
DummyObservationRandomizer("r1", DummyRandomizerParameter("foo", 0.0)),
DummyObservationRandomizer("r2", DummyRandomizerParameter("bar", 1.0)),
]
)
assert len(randomizer.get_randomizers()) == 2
assert len(randomizer.get_parameters()) == 2
obs = randomizer.randomize({}, self.random_state)
assert obs["foo"] == 0.0
assert obs["bar"] == 1.0
def test_env_randomization(self):
randomization = EnvRandomization(
parameter_randomizer=EnvParameterRandomizer(DummyEnvParameter()),
observation_randomizer=EnvObservationRandomizer(
[
DummyObservationRandomizer(
"r1", DummyRandomizerParameter("foo", 0.0)
),
]
),
action_randomizer=EnvActionRandomizer([]),
simulation_randomizer=EnvSimulationRandomizer([]),
)
randomization.update_parameter("observation.r1:foo", 0.5)
parameter = randomization.get_parameter("observation.r1:foo")
assert parameter.get_value() == 0.5
| 4,093 | 30.984375 | 87 | py |
cad.js | cad.js-master/scripts/tyson.py | # Copyright (C) 2011-2012 Alexander Shorin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Customized by Gabor Pap
# 2014
import logging
import sys
from math import isinf, isnan
from decimal import Decimal
from struct import pack, unpack
version = '.'.join(map(str, sys.version_info[:2]))
if version >= '3.0':
from io import BytesIO
basestring = (str, bytes)
unicode = str
bytes = bytes
long = int
xrange = range
d = {}
dict_keysiterator = type(d.keys())
dict_valuesiterator = type(d.values())
dict_itemsiterator = type(d.items())
else:
from cStringIO import StringIO as BytesIO
basestring = basestring
unicode = unicode
b = bytes = str
long = long
xrange = xrange
d = {}
dict_keysiterator = type(d.iterkeys())
dict_valuesiterator = type(d.itervalues())
dict_itemsiterator = type(d.iteritems())
b = lambda s: isinstance(s, unicode) and s.encode('latin1') or s
u = lambda s: isinstance(s, bytes) and s.decode('utf-8') or s
XRangeType = type(xrange(0))
LOG = logging.getLogger(__name__)
NOOP_SENTINEL = type('NoOp', (object,), {'__slots__': ()})()
MIXED = b('M')
NOOP = b('N')
EOS = b('E')
NULL = b('Z')
FALSE = b('F')
TRUE = b('T')
INT8 = b('B')
INT16 = b('i')
INT32 = b('I')
INT64 = b('L')
FLOAT = b('d')
DOUBLE = b('D')
STRING_S = b('s')
STRING_L = b('S')
HIDEF_S = b('h')
HIDEF_L = b('H')
ARRAY_S = b('a')
OBJECT_S = b('o')
ARRAY_L = b('A')
OBJECT_L = b('O')
FF = b(chr(255))
BOS_A = object()
BOS_O = object()
CONSTANTS = set([NOOP, EOS, NULL, FALSE, TRUE])
NUMBERS = set([INT8, INT16, INT32, INT64, FLOAT, DOUBLE])
STRINGS = set([STRING_S, STRING_L, HIDEF_S, HIDEF_L])
SHORT_OBJ = set([STRING_S, HIDEF_S, ARRAY_S, OBJECT_S])
LARGE_OBJ = set([STRING_L, HIDEF_L, ARRAY_L, OBJECT_L])
STREAMS = set([ARRAY_S, OBJECT_S])
OBJECT_KEYS = set([STRING_S, STRING_L])
FORBIDDEN = set([NOOP, EOS])
CHARS = dict((i, b(chr(i))) for i in range(256))
class DecodeError(ValueError):
"""UBJSON data decoding error."""
class MarkerError(DecodeError):
"""Raises if unknown or invalid marker was found in decoded data stream."""
class EarlyEndOfStreamError(DecodeError):
"""Raises when data stream unexpectedly ends."""
class EncodeError(TypeError):
"""Python object encoding error."""
class TysonDecoder(object):
# TODO: adjust the decoder to understand the tyson format, now it does
# DRAFT 8 ubjson decoding
"""Decoder of UBJSON data to Python object following Draft 8 specification
and using next data mapping:
+--------+----------------------------+----------------------------+-------+
| Marker | UBJSON type | Python type | Notes |
+========+============================+============================+=======+
| ``N`` | noop | :const:`~simpleubjson.NOOP`| \(1) |
+--------+----------------------------+----------------------------+-------+
| ``Z`` | null | None | |
+--------+----------------------------+----------------------------+-------+
| ``F`` | false | bool | |
+--------+----------------------------+----------------------------+-------+
| ``T`` | true | bool | |
+--------+----------------------------+----------------------------+-------+
| ``B`` | byte | int | |
+--------+----------------------------+----------------------------+-------+
| ``i`` | int16 | int | |
+--------+----------------------------+----------------------------+-------+
| ``I`` | int32 | int | |
+--------+----------------------------+----------------------------+-------+
| ``L`` | int64 | long | |
+--------+----------------------------+----------------------------+-------+
| ``d`` | float | float | |
+--------+----------------------------+----------------------------+-------+
| ``D`` | double | float | |
+--------+----------------------------+----------------------------+-------+
| ``h`` | hugeint - 2 bytes | decimal.Decimal | |
+--------+----------------------------+----------------------------+-------+
| ``H`` | hugeint - 5 bytes | decimal.Decimal | |
+--------+----------------------------+----------------------------+-------+
| ``s`` | string - 2 bytes | unicode | |
+--------+----------------------------+----------------------------+-------+
| ``S`` | string - 5 bytes | unicode | |
+--------+----------------------------+----------------------------+-------+
| ``a`` | array - 2 bytes | list | |
+--------+----------------------------+----------------------------+-------+
| ``a`` | array - unsized | generator | \(2) |
+--------+----------------------------+----------------------------+-------+
| ``A`` | array - 5 bytes | list | |
+--------+----------------------------+----------------------------+-------+
| ``o`` | object - 2 bytes | dict | |
+--------+----------------------------+----------------------------+-------+
| ``o`` | object - unsized | generator | \(3) |
+--------+----------------------------+----------------------------+-------+
| ``O`` | object - 5 bytes | dict | |
+--------+----------------------------+----------------------------+-------+
Notes:
(1)
`NoOp` values are ignored by default if only `allow_noop` argument
wasn't passed as ``True``.
(2)
Nested generators are automatically converted to lists.
(3)
Unsized objects are represented as list of 2-element tuple with object
key and value.
"""
dispatch = {}
def __init__(self, source, allow_noop=False):
if isinstance(source, unicode):
source = source.encode('utf-8')
if isinstance(source, bytes):
source = BytesIO(source)
self.read = source.read
self.allow_noop = allow_noop
self.dispatch = self.dispatch.copy()
def __iter__(self):
return self
def next_tlv(self):
tag = self.read(1)
while tag == NOOP and not self.allow_noop:
tag = self.read(1)
if tag in NUMBERS:
if tag == INT8:
# Trivial operations for trivial cases saves a lot of time
value = ord(self.read(1))
if value > 128:
value -= 256
#value, = unpack('>b', self.read(1))
elif tag == INT16:
value, = unpack('>h', self.read(2))
elif tag == INT32:
value, = unpack('>i', self.read(4))
elif tag == INT64:
value, = unpack('>q', self.read(8))
elif tag == FLOAT:
value, = unpack('>f', self.read(4))
elif tag == DOUBLE:
value, = unpack('>d', self.read(8))
else:
raise MarkerError('tag %r not in NUMBERS %r' % (tag, NUMBERS))
return tag, None, value
elif tag in SHORT_OBJ:
length = ord(self.read(1))
if tag in STRINGS:
if length == 255:
raise MarkerError(
'Short string objects (%r) should not have length 255'
% tag)
return tag, length, self.read(length)
return tag, length, None
elif tag in LARGE_OBJ:
length, = unpack('>I', self.read(4))
if tag in STRINGS:
return tag, length, self.read(length)
return tag, length, None
elif tag in CONSTANTS:
return tag, None, None
elif not tag:
raise EarlyEndOfStreamError('nothing to decode')
else:
raise MarkerError('invalid marker 0x%02x (%r)' % (ord(tag), tag))
def decode_next(self):
tag, length, value = self.next_tlv()
return self.dispatch[tag](self, tag, length, value)
__next__ = next = decode_next
def decode_noop(self, tag, length, value):
return NOOP_SENTINEL
dispatch[NOOP] = decode_noop
def decode_none(self, tag, length, value):
return None
dispatch[NULL] = decode_none
def decode_false(self, tag, length, value):
return False
dispatch[FALSE] = decode_false
def decode_true(self, tag, length, value):
return True
dispatch[TRUE] = decode_true
def decode_int(self, tag, length, value):
return value
dispatch[INT8] = decode_int
dispatch[INT16] = decode_int
dispatch[INT32] = decode_int
dispatch[INT64] = decode_int
def decode_float(self, tag, length, value):
return value
dispatch[FLOAT] = decode_float
dispatch[DOUBLE] = decode_float
def decode_string(self, tag, length, value):
return value.decode('utf-8')
dispatch[STRING_S] = decode_string
dispatch[STRING_L] = decode_string
def decode_hidef(self, tag, length, value):
return Decimal(value.decode('utf-8'))
dispatch[HIDEF_S] = decode_hidef
dispatch[HIDEF_L] = decode_hidef
def decode_array(self, tag, length, value):
if tag == ARRAY_S and length == 255:
return self.decode_array_stream(tag, length, value)
res = [None] * length
next_tlv = self.next_tlv
dispatch = self.dispatch
forbidden = FORBIDDEN
streams = STREAMS
for _ in range(length):
tag, length, value = next_tlv()
if tag in forbidden:
raise MarkerError('invalid marker occurs: %02X' % ord(tag))
item = dispatch[tag](self, tag, length, value)
if tag in streams and length == 255:
item = list(item)
res[_] = item
return res
dispatch[ARRAY_S] = decode_array
dispatch[ARRAY_L] = decode_array
def decode_object(self, tag, length, value):
if tag == OBJECT_S and length == 255:
return self.decode_object_stream(tag, length, value)
res = {}
key = None
next_tlv = self.next_tlv
dispatch = self.dispatch
forbidden = FORBIDDEN
object_keys = OBJECT_KEYS
streams = STREAMS
for _ in range(length * 2):
tag, length, value = next_tlv()
if tag in forbidden:
raise MarkerError('invalid marker found: %02X' % ord(tag))
if key is None and tag not in object_keys:
raise MarkerError('key should be string, got %r' % (tag))
value = dispatch[tag](self, tag, length, value)
if key is None:
key = value
else:
if tag in streams and length == 255:
value = list(value)
res[key] = value
key = None
return res
dispatch[OBJECT_S] = decode_object
dispatch[OBJECT_L] = decode_object
def decode_array_stream(self, tag, length, value):
dispatch = self.dispatch
next_tlv = self.next_tlv
eos = EOS
streams = STREAMS
def array_stream():
while 1:
tag, length, value = next_tlv()
if tag == eos:
break
item = dispatch[tag](self, tag, length, value)
if tag in streams and length == 255:
yield list(item)
else:
yield item
return array_stream()
def decode_object_stream(self, tag, length, value):
dispatch = self.dispatch
next_tlv = self.next_tlv
eos = EOS
object_keys = OBJECT_KEYS
noop = NOOP
noop_sentinel = NOOP_SENTINEL
streams = STREAMS
def object_stream():
key = None
while 1:
tag, length, value = next_tlv()
if tag == noop and key is None:
yield noop_sentinel, noop_sentinel
elif tag == NOOP and key:
continue
elif tag == eos:
if key:
raise EarlyEndOfStreamError('value missed for key %r'
% key)
break
elif key is None and tag not in object_keys:
raise MarkerError('key should be string, got %r' % (tag))
else:
value = dispatch[tag](self, tag, length, value)
if key is None:
key = value
elif tag in streams:
yield key, list(value)
key = None
else:
yield key, value
key = None
return object_stream()
class TysonEncoder(object):
"""Encoder of Python objects into UBJSON data following Draft 8
specification rules with next data mapping:
+-----------------------------+------------------------------------+-------+
| Python type | UBJSON type | Notes |
+=============================+====================================+=======+
| :const:`~simpleubjson.NOOP` | NoOp | |
+-----------------------------+------------------------------------+-------+
| :const:`None` | null | |
+-----------------------------+------------------------------------+-------+
| :class:`bool` | :const:`False` => false | |
| | :const:`True` => true | |
+-----------------------------+------------------------------------+-------+
| :class:`int`, | `integer` or `huge` | \(1) |
| :class:`long` | | |
+-----------------------------+------------------------------------+-------+
| :class:`float` | `float`, `null` or `huge` | \(2) |
+-----------------------------+------------------------------------+-------+
| :class:`str`, | string | \(3) |
| :class:`unicode` | | \(4) |
+-----------------------------+------------------------------------+-------+
| :class:`tuple`, | sized array | \(3) |
| :class:`list`, | | |
| :class:`set`, | | |
| :class:`frozenset`, | | |
+-----------------------------+------------------------------------+-------+
| :class:`generator`, | unsized array | |
| :class:`XRange` | | |
+-----------------------------+------------------------------------+-------+
| :class:`dict` | object | \(3) |
| | | \(5) |
+-----------------------------+------------------------------------+-------+
| :class:`dict_itemsiterator` | unsized object | \(5) |
+-----------------------------+------------------------------------+-------+
| :class:`decimal.Decimal` | hidef | |
+-----------------------------+------------------------------------+-------+
Notes:
(1)
Depending on value it may be encoded into various UBJSON types:
* [-2^7, 2^7): ``int8``
* [-2^15, 2^15): ``int16``
* [-2^31, 2^31): ``int32``
* [-2^63, 2^63): ``int64``
* everything bigger/smaller: ``huge``
(2)
Depending on value it may be encoded into various UBJSON types:
* 1.18e-38 <= abs(value) <= 3.4e38: ``float``
* 2.23e-308 <= abs(value) < 1.8e308: ``double``
* :const:`inf`, :const:`-inf`: ``null``
* everything bigger/smaller: ``huge``
(3)
Depending on object length short or long version of UBJSON type may be
produced.
(4)
Unicode string are been encoded with utf-8 charset. Byte strings are
required to have `utf-8` encoding or :exc:`simpleubjson.EncodeError`
will be raised.
(5)
Dict keys should have string type or :exc:`simpleubjson.EncodeError`
will be raised.
Customization: When an integer array is encountered the encoder uses the
type information of the largest element, if it fails for any reason it
reverts and encodes the whole sequence as MIXED.
This works well when the numbers in the array are mostly in the same
range
"""
dispatch = {}
def __init__(self, default=None):
self._default = default or self.default
def default(self, obj):
raise EncodeError('unable to encode %r' % obj)
@classmethod
def encode(cls, data, output=None):
"""Encodes Python object to Universal Binary JSON data.
:param data: Python object.
:param output: `.write([data])`-able object. If omitted result would be
returned instead of written into.
:return: Encoded Python object. See mapping table below.
If `output` param is specified, all data would be written into it
by chunks and None will be returned.
"""
res = TysonEncoder(None).encode_next(data)
if output:
output.write(res)
else:
return res
def encode_next(self, obj):
tobj = type(obj)
if tobj in self.dispatch:
res = self.dispatch[tobj](self, obj)
else:
return self.encode_next(self._default(obj))
if isinstance(res, bytes):
return res
return bytes().join(res)
def encode_noop(self, obj):
return NOOP
dispatch[type(NOOP_SENTINEL)] = encode_noop
def encode_none(self, obj):
return NULL
dispatch[type(None)] = encode_none
def encode_bool(self, obj):
return TRUE if obj else FALSE
dispatch[bool] = encode_bool
def encode_int(self, obj, int_type=None):
if int_type:
if type(obj) not in [int, long]:
raise EncodeError('Not an integer: %r' % obj)
if int_type == INT8:
return CHARS[obj % 256]
elif int_type == INT16:
return pack('>h', obj)
elif int_type == INT32:
return pack('>i', obj)
elif int_type == INT64:
return pack('>q', obj)
if (-2 ** 7) <= obj <= (2 ** 7 - 1):
return INT8 + CHARS[obj % 256]
elif (-2 ** 15) <= obj <= (2 ** 15 - 1):
return INT16 + pack('>h', obj)
elif (-2 ** 31) <= obj <= (2 ** 31 - 1):
return INT32 + pack('>i', obj)
elif (-2 ** 63) <= obj <= (2 ** 63 - 1):
return INT64 + pack('>q', obj)
else:
return self.encode_decimal(Decimal(obj))
dispatch[int] = encode_int
dispatch[long] = encode_int
def encode_float(self, obj):
if 1.18e-38 <= abs(obj) <= 3.4e38:
return FLOAT + pack('>f', obj)
elif 2.23e-308 <= abs(obj) < 1.8e308:
return DOUBLE + pack('>d', obj)
elif isinf(obj) or isnan(obj):
return NULL
else:
return self.encode_decimal(Decimal(obj))
dispatch[float] = encode_float
def _encode_str(self, obj):
length = len(obj)
if length < 255:
return STRING_S + CHARS[length] + obj
else:
return STRING_L + INT32 + pack('>i', length) + obj
def encode_bytes(self, obj):
try:
obj.decode('utf-8')
except UnicodeDecodeError:
raise EncodeError('Invalid UTF-8 byte string: %r' % obj)
else:
return self._encode_str(obj)
dispatch[bytes] = encode_bytes
def encode_str(self, obj):
return self._encode_str(obj.encode('utf-8'))
dispatch[unicode] = encode_str
def encode_decimal(self, obj):
obj = unicode(obj).encode('utf-8')
length = len(obj)
if length < 255:
return HIDEF_S + CHARS[length] + obj
else:
return HIDEF_L + pack('>i', length) + obj
dispatch[Decimal] = encode_decimal
def encode_sequence(self, obj):
length = len(obj)
array_type = MIXED
try:
encoded_item = self.encode_next(max([abs(item) for item in obj]))
if encoded_item.startswith(INT8):
array_type = INT8
elif encoded_item.startswith(INT16):
array_type = INT16
elif encoded_item.startswith(INT32):
array_type = INT32
elif encoded_item.startswith(INT64):
array_type = INT64
except:
# not number elements
array_type = MIXED
if array_type != MIXED:
try:
if length < 255:
return_value = ARRAY_S + array_type + CHARS[length]
else:
return_value = ARRAY_L + array_type + pack('>I', length)
for item in obj:
return_value += self.encode_int(item, array_type)
yield return_value
except:
# the maximum was an integer, but the array is not homogeneous
array_type = MIXED
if array_type == MIXED:
if length < 255:
yield ARRAY_S + array_type + CHARS[length]
else:
yield ARRAY_L + array_type + pack('>I', length)
for item in obj:
yield self.encode_next(item)
dispatch[tuple] = encode_sequence
dispatch[list] = encode_sequence
dispatch[set] = encode_sequence
dispatch[frozenset] = encode_sequence
def encode_dict(self, obj):
length = len(obj)
if length < 255:
yield OBJECT_S + CHARS[length]
else:
yield OBJECT_L + pack('>I', length)
for key, value in obj.items():
if isinstance(key, unicode):
yield self.encode_str(key)
elif isinstance(key, bytes):
yield self.encode_bytes(key)
else:
raise EncodeError('invalid object key %r' % key)
yield self.encode_next(value)
dispatch[dict] = encode_dict
def encode_generator(self, obj):
yield ARRAY_S + FF
for item in obj:
yield self.encode_next(item)
yield EOS
dispatch[xrange] = encode_generator
dispatch[type((i for i in ()))] = encode_generator
dispatch[dict_keysiterator] = encode_generator
dispatch[dict_valuesiterator] = encode_generator
def encode_dictitems(self, obj):
yield OBJECT_S + FF
for key, value in obj:
if isinstance(key, unicode):
yield self.encode_str(key)
elif isinstance(key, bytes):
yield self.encode_bytes(key)
else:
raise EncodeError('invalid object key %r' % key)
yield self.encode_next(value)
yield EOS
dispatch[dict_itemsiterator] = encode_dictitems
| 25,604 | 37.046062 | 82 | py |
cad.js | cad.js-master/scripts/xmlToJson.py | #!/usr/bin/env python
# L. Howard Copyright @2014
# Convert a CAD model (per the STEPtools defined XML spec)
# into a JSON spec model
# Derived from Javascript version at
# https://github.com/ghemingway/cad.js/blob/master/scripts/xmlToJson.js
import argparse
from datetime import datetime
import json
import math
from multiprocessing import cpu_count, Process, Queue
from operator import itemgetter
import os
from os.path import join
import re
import sys
import time
import xml.etree.cElementTree as ET
from tyson import TysonEncoder
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG)
LOG = logging.getLogger(__name__)
# defaults and constants
DEFAULT_COLOR = "7d7d7d"
IDENTITY_TRANSFORM = "1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1"
SHELL_REGEX = re.compile("shell_(.*?).json")
ALT_ENCODINGS = ['latin-1']
#------------------------------------------------------------------------------
CONFIG = {
'indexPoints': True,
'indexNormals': True,
'compressColors': True,
'roundPrecision': 2
}
def round_float(val, precision):
"""floating point rounder"""
if not precision:
return val
factor = math.pow(10, precision)
return int(round(val * factor))
def parse_xml(path):
"""XML parsing with fail back to alternate encodings"""
encodings = [None] + ALT_ENCODINGS[:]
for e in encodings:
p = ET.XMLParser(encoding=e)
try:
return ET.parse(path, parser=p)
except ET.ParseError:
pass
return None
#------------------------------------------------------------------------------
def translate_index(doc, use_tyson):
"""Returns the full JSON"""
return {
'root': doc.attrib['root'],
'products': [translate_product(x) for x in doc.iter('product')],
'shapes': [translate_shape(x) for x in doc.iter('shape')],
'shells': [translate_shell(x) for x in doc.iter('shell')],
'annotations': [translate_annotation(x) for x in doc.iter('annotation')],
'useTyson': use_tyson
}
def translate_product(product):
"""Translates a product"""
data = {
'id': product.attrib['id'],
'step': product.attrib.get('step', ""),
'name': product.attrib['name']
}
# Add children, if there are any
if product.attrib.get('children'):
data['children'] = product.attrib['children'].split(" ")
# Add shapes, if there are any
if product.attrib.get('shape'):
data['shapes'] = product.attrib['shape'].split(" ")
return data
def set_transform(transform):
"""Sets a transform"""
return ("I" if transform == IDENTITY_TRANSFORM else
[float(x) for x in transform.split(" ")])
def translate_shape(shape):
"""Translates a shape"""
data = {
'id': shape.attrib['id'],
# "unit": shape.attrib['unit'],
}
data.update({x: [] for x in ('shells', 'annotations', 'children')})
for child in shape.iter('child'):
# Add children, if there are any
data['children'].append({
'ref': child.attrib['ref'],
'xform': set_transform(child.attrib['xform'])
})
# Add child annotations
if shape.attrib.get('annotation'):
data['annotations'] = shape.attrib['annotation'].split(" ")
# Terminal Shape JSON
if shape.attrib.get('shell'):
data['shells'] = shape.attrib['shell'].split(" ")
return data
def translate_annotation(annotation):
"""Translates an annotation"""
data = dict(id=annotation.attrib['id'])
if 'href' in annotation.attrib:
data['href'] = annotation.attrib['href'].replace("xml", "json")
else:
data['lines'] = []
for polyline in annotation.iter('polyline'):
points = []
for p in polyline.iter('p'):
points.extend([float(x) for x in p.attrib['l'].split(" ")])
data['lines'].append(points)
return data
#------------------------------------------------------------------------------
def make_index(data, ikey, ranger=None):
"""Create indexes, an abstraction of indexing functions in the original"""
if ranger is None:
ranger = xrange(len(data[ikey]))
indexes = data[ikey + "Index"] = []
values = data['values']
for i in ranger:
val = round_float(data[ikey][i], CONFIG['roundPrecision'])
if val not in values:
values[val] = len(values)
indexes.append(values[val])
del data[ikey]
# The following functions are named indexShellxxx in the original
# The renaming aligns them with settings in the indexing CONFIG
indexPoints = lambda d: make_index(d, 'points')
indexNormals = lambda d: make_index(d, 'normals')
def compress_shell_colors(data):
"""Color compression"""
num_tuples = len(data['colors']) / 3
data['colorsData'] = []
start = 0
last = [data['colors'][x] for x in xrange(3)]
# Short list comparison
arrays_identical = lambda a, b: all([a[x] == b[x] for x in xrange(3)])
# Compress the rest
for tupl in xrange(num_tuples):
index = tupl * 3
tmp = [data['colors'][index + x] for x in xrange(3)]
# Is this a new block?
if not arrays_identical(last, tmp):
data['colorsData'].append(dict(data=last, duration=tupl - start))
start = tupl
last = tmp
# append the final color block
data['colorsData'].append(dict(data=last, duration=num_tuples - start))
# remove the colors index
del data['colors']
#------------------------------------------------------------------------------
def translate_shell(shell):
"""Translates a shell"""
if 'href' in shell.attrib:
# Do href here
return {
'id': shell.attrib['id'],
'size': int(shell.attrib['size']),
'bbox': [float(x) for x in shell.attrib['bbox'].split(" ")],
'href': shell.attrib['href'].replace("xml", "json")
}
else:
# Convert XML point/vert/color to new way
points = load_points(shell.iter("verts"))
default_color = parse_color(shell.attrib.get('color', DEFAULT_COLOR))
data = dict(id=shell.attrib['id'], size=0)
data.update({x: [] for x in ('points', 'normals', 'colors')})
for facet in shell.iter('facets'):
color = default_color
if 'color' in facet.attrib:
color = parse_color(facet.attrib['color'])
for f in facet.iter('f'):
# Get every vertex index and convert using points array
index_vals = f.attrib['v'].split(" ")
for i in range(3):
ival = int(index_vals[i]) * 3
data['points'].append(float(points[ival]))
data['points'].append(float(points[ival + 1]))
data['points'].append(float(points[ival + 2]))
# Get the vertex normals
norms = [x for x in f.iter('n')]
for i in range(3):
norm_coordinates = norms[i].attrib['d'].split(" ")
for j in range(3):
data['normals'].append(float(norm_coordinates[j]))
# Get the vertex colors
for i in range(3):
for c in ('r', 'g', 'b'):
data['colors'].append(color[c])
data['size'] = len(data['points']) / 9
indexing = [x for x in CONFIG if x.startswith('index') and CONFIG[x]]
for i in indexing:
data['precision'] = CONFIG['roundPrecision']
if 'values' not in data:
data['values'] = {}
globals()[i](data)
if indexing:
sorted_vals = sorted(data['values'].items(), key=itemgetter(1))
data['values'] = map(itemgetter(0), sorted_vals)
if CONFIG.get('compressColors'):
compress_shell_colors(data)
return data
def parse_color(hex_color):
"""Parse color values"""
cval = int(hex_color, 16)
x = lambda b: ((cval >> b) & 0xff) / 255.0
return {k: x(v) for k, v in dict(r=16, g=8, b=0).iteritems()}
def load_points(verts):
"""Load all of the point information"""
points = []
for vert in verts:
for v in vert:
points.extend(v.attrib['p'].split(" "))
return points
#------------------------------------------------------------------------------
class WorkerBase(Process):
"""Base class for Workers"""
def __init__(self, queue, exceptions):
Process.__init__(self)
self.queue = queue
self.exceptions = exceptions
def report_exception(self, job, reason):
"""Report a job exception"""
info = dict(reason=reason)
info.update(job)
self.exceptions.put(info)
def run(self):
raise NotImplementedError
class BatchWorker(WorkerBase):
"""Worker process for parallelized shell batching"""
def run(self):
"""Process jobs"""
while True:
job = self.queue.get()
if job is None:
break
# process shells
batch = {'shells': []}
indexed = CONFIG['indexPoints'] or CONFIG['indexNormals']
reindex = job['reindex'] and indexed
use_tyson = job['use_tyson']
if reindex:
batch['values'] = {}
for s in job['shells']:
try:
with open(join(job['path'], s)) as f:
shell = json.load(f)
sid = SHELL_REGEX.match(s).group(1)
shell['id'] = sid
if reindex:
imap = {}
for i, value in enumerate(shell['values']):
if value not in batch['values']:
batch['values'][value] = len(batch['values'])
imap[i] = batch['values'][value]
del shell['values']
for item in ('points', 'normals'):
idx = item + 'Index'
if idx in shell:
shell[idx] = [imap[x] for x in shell[idx]]
batch['shells'].insert(0, shell)
except Exception as e:
reason = "Error batching shell '{}': {}".format(s, e)
self.report_exception(job, reason)
continue
# transform values to list
if reindex:
sorted_v = sorted(batch['values'].items(), key=itemgetter(1))
batch['values'] = map(itemgetter(0), sorted_v)
# write batch
extension = use_tyson and ".tyson" or ".json"
out_path = join(job['path'], job['name'] + extension)
try:
with open(out_path, "w") as f:
if use_tyson:
TysonEncoder().encode(batch, f)
f.flush();
# Must add padding for now to avoid a chrome bug
f.seek(0,2)
f_size = f.tell()
for i in range(0, 8 - (f_size % 8)):
f.write(chr(0))
else:
json.dump(batch, f)
except Exception as e:
reason = "Unable to output JSON '{}': {}.".format(out_path, e)
self.report_exception(job, reason)
class TranslationWorker(WorkerBase):
"""Worker process for parallelized translation"""
def run(self):
"""Process jobs"""
while True:
job = self.queue.get()
if job is None:
break
path = job['path']
tree = parse_xml(path)
if not tree:
reason = "Unable to parse XML file '{}'.".format(path)
self.report_exception(job, reason)
continue
root = tree.getroot()
try:
data = job['translator'](root)
except Exception as e:
reason = "Translation failure: '{}'.".format(e)
self.report_exception(job, reason)
continue
out_path = os.path.splitext(path)[0] + ".json"
try:
with open(out_path, "w") as f:
json.dump(data, f)
except Exception as e:
reason = "Unable to output JSON '{}': {}.".format(out_path, e)
self.report_exception(job, reason)
class XMLTranslator(object):
"""Translates STEP XML files to JSON"""
def __init__(self, batches=None, reindex=None, use_tyson=None):
self.batches = batches
self.reindex = reindex
self.use_tyson = use_tyson
self.parser = None
@staticmethod
def assign(batches, shell):
"""simple bin packing"""
name, size = shell
blist = batches.values()
best = min([x['total_size'] for x in blist])
selected = [x for x in blist if x['total_size'] == best][0]
selected['total_size'] += size
selected['shells'].append(name)
def get_batches(self, shells):
"""assign shells to batches, leveling by size"""
batches = {'batch%s' % i: {'total_size': 0, 'shells': []}
for i in xrange(self.batches)}
for shell in shells:
self.assign(batches, shell)
return batches
def batch_shells(self, xml_dir):
"""Generates batched shell files"""
is_shell = lambda x: SHELL_REGEX.match(x)
size_of = lambda x: os.path.getsize(join(xml_dir, x))
shells = [(x, size_of(x)) for x in os.listdir(xml_dir) if is_shell(x)]
shells.sort(key=itemgetter(1), reverse=True)
batches = self.get_batches(shells)
# start workers and queue jobs
queue = Queue()
exceptions = Queue()
count = min(cpu_count(), self.batches)
workers = [BatchWorker(queue, exceptions) for _ in xrange(count)]
for w in workers:
w.start()
# enqueue jobs
for batch, info in batches.items():
job = {'path': xml_dir, 'name': batch, 'shells': info['shells'],
'reindex': self.reindex,
'use_tyson': self.use_tyson}
queue.put(job)
# add worker termination cues
for _ in workers:
queue.put(None)
# wait for completion
while any([x.is_alive() for x in workers]):
time.sleep(1)
# report errors, if any
has_errors = not exceptions.empty()
while not exceptions.empty():
info = exceptions.get()
msg = "Error processing '{}': {}"
LOG.error(msg.format(info['path'], info['reason']))
if not has_errors:
# report statistics
msg = "Batch sizes. Smallest: {} Largest: {} Avg: {}"
sz = [x['total_size'] for x in batches.values()]
LOG.debug(msg.format(min(sz), max(sz), sum(sz) / len(batches)))
c = [len(x['shells']) for x in batches.values()]
msg = "Batch shells. Smallest: {} Largest: {} Avg: {}"
LOG.debug(msg.format(min(c), max(c), sum(c) / len(batches)))
shells_size = sum([size for name, size in shells])
msg = "Shells. Count: {} Total Size: {} bytes."
LOG.debug(msg.format(len(shells), shells_size))
batch_extension = '.tyson' if self.use_tyson else '.json'
sizes = [size_of(x + batch_extension) for x in batches.keys()]
batches_size = sum(sizes)
msg = "Batches. Count: {} Total Size: {} bytes."
LOG.debug(msg.format(len(sizes), batches_size))
compression = float(batches_size) / float(shells_size)
LOG.debug("Compression: {}".format(compression))
return has_errors
def translate(self, xml_dir, xml_index):
"""Process index XML and enqueue jobs for workers"""
if not os.path.isdir(xml_dir):
LOG.error("'{}' is not a directory.".format(xml_dir))
return True
index_path = join(xml_dir, xml_index)
if not os.path.isfile(index_path):
LOG.error("Unable to locate index file '{}'.".format(index_path))
return True
tree = parse_xml(index_path)
if not tree:
LOG.error("Unable to parse index file '{}'.".format(index_path))
return True
root = tree.getroot()
try:
data = translate_index(root, self.use_tyson)
except Exception as e:
LOG.exception("Unable to translate index file.")
return True
pluck = lambda e, a: [x for x in data.get(e, []) if a in x]
external_shells = pluck('shells', 'href')
external_annotations = pluck('annotations', 'href')
index_out = join(xml_dir, os.path.splitext(xml_index)[0] + ".json")
LOG.debug("Writing new index file: " + index_out)
LOG.debug("\tProducts: %s" % len(data.get('projects', [])))
LOG.debug("\tShapes: %s" % len(data.get('shapes', [])))
LOG.debug("\tAnnotations: %s" % len(data.get('annotations', [])))
LOG.debug("\tExternal Annotations: %s" % len(external_annotations))
LOG.debug("\tShells: %s" % len(data.get('shells', [])))
num_shells = len(external_shells)
LOG.debug("\tExternal Shells: %s" % num_shells)
if self.batches and num_shells:
if num_shells < self.batches:
self.batches = 1
LOG.debug("\tBatches: %s" % self.batches)
data['batches'] = self.batches
else:
self.batches = 0
try:
with open(index_out, "w") as f:
json.dump(data, f)
except Exception as e:
LOG.exception("Unable to write JSON file '{}'.".format(index_out))
return True
# start workers and queue jobs
queue = Queue()
exceptions = Queue()
count = cpu_count()
workers = [TranslationWorker(queue, exceptions) for _ in xrange(count)]
for w in workers:
w.start()
xml_path = lambda p: join(xml_dir, os.path.splitext(p)[0] + ".xml")
for annotation in external_annotations:
queue.put({
'type': "annotation",
'path': xml_path(annotation['href']),
'translator': translate_annotation
})
for shell in external_shells:
queue.put({
'type': "shell",
'path': xml_path(shell['href']),
'translator': translate_shell
})
# add worker termination cues
for _ in workers:
queue.put(None)
# wait for completion
while any([x.is_alive() for x in workers]):
time.sleep(1)
# report errors, if any
has_errors = not exceptions.empty()
while not exceptions.empty():
info = exceptions.get()
msg = "Error processing '{}': {}"
LOG.error(msg.format(info['path'], info['reason']))
if has_errors or not self.batches:
return has_errors
return self.batch_shells(xml_dir)
#------------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog='xmlToJson.py',
description="Translates STEP XML to JSON")
parser.add_argument("dir", help="directory containing STEP XML")
parser.add_argument("index", help="index file")
h = "create batches of shells"
parser.add_argument("-b", "--batches", type=int, default=0, help=h)
h = "re-index when batching shells"
parser.add_argument("-r", "--reindex", action="store_true", help=h)
h = "output TySON instead of JSON"
parser.add_argument("-t", "--tyson", action="store_true", help=h)
args = parser.parse_args()
start_time = datetime.now()
translator = XMLTranslator(args.batches, args.reindex, args.tyson)
errors_in_translation = translator.translate(args.dir, args.index)
dt = datetime.now() - start_time
LOG.info("xmlToJson Elapsed time: {} secs".format(dt.seconds))
sys.exit(1 if errors_in_translation else 0)
| 20,459 | 34.957821 | 81 | py |
nat-acl2020 | nat-acl2020-master/main.py | from torch.optim.sgd import SGD
import os.path
import sys, csv, random, logging
import numpy as np
FIXED_RANDOM_SEEDS = False
if FIXED_RANDOM_SEEDS:
random.seed(0)
np.random.seed(0)
EXIT_SUCCESS=0
EXIT_FAILURE=-1
def evaluate(model_path, corpus, mini_batch_size=256, misspelling_rate=0.0,
cmx_file="", typos_file="", spell_check = None):
"""
Evaluates the model on the test set of the given corpus.
Appends the results to the eval.txt file in the model's directory.
Parameters:
model_path (str): path to the model to be evaluated
corpus (ColumnCorpus): loaded corpus
mini_batch_size (int): size of batches used by the evaluation function
misspelling_rate (float): misspelling rate (used in case of 'random' misspelling mode)
cmx_file (str): confusion matrix file (used in case of 'confusion matrix' misspelling mode)
typos_file (str): file with typos (used in case of 'typos' misspelling mode)
spell_check (HunSpell): spell checking module (optional)
"""
from robust_ner.enums import EvalMode, MisspellingMode
if cmx_file:
eval_mode = EvalMode.Misspellings
misspell_mode = MisspellingMode.ConfusionMatrixBased
elif typos_file:
eval_mode = EvalMode.Misspellings
misspell_mode = MisspellingMode.Typos
elif misspelling_rate > 0.0:
eval_mode = EvalMode.Misspellings
misspell_mode = MisspellingMode.Random
else:
eval_mode = EvalMode.Standard
misspell_mode = MisspellingMode.Random
# load the tagger model
from flair_ext.models import NATSequenceTagger
tagger = NATSequenceTagger.load(model_path)
eval_data = corpus.test
from robust_ner.noise import make_char_vocab
from robust_ner.confusion_matrix import load_confusion_matrix, filter_cmx
from robust_ner.typos import load_typos
char_vocab = make_char_vocab(eval_data)
cmx, lut, typos = None, {}, {}
# initialize resources used for evaluation
if misspell_mode == MisspellingMode.ConfusionMatrixBased:
cmx, lut = load_confusion_matrix(cmx_file)
cmx, lut = filter_cmx(cmx, lut, char_vocab)
elif misspell_mode == MisspellingMode.Typos:
typos = load_typos(typos_file, char_vocab, False)
# fixed parameters
num_workers = 8
# evaluate the model
result, loss = tagger.evaluate(eval_data, mini_batch_size, num_workers=num_workers,
eval_mode=eval_mode, misspell_mode=misspell_mode, misspelling_rate=misspelling_rate,
char_vocab=char_vocab, cmx=cmx, lut=lut, typos=typos, spell_check=spell_check)
# append the evaluation results to a file
model_dir = os.path.dirname(model_path)
eval_txt = os.path.join(model_dir, "eval.txt")
with open(eval_txt, "a") as f:
f.write(f"eval_mode: {eval_mode}\n")
f.write(f"spell_checking: {spell_check != None}\n")
if eval_mode == EvalMode.Misspellings:
f.write(f"misspell_mode: {misspell_mode}\n")
if misspell_mode == MisspellingMode.Random:
f.write(f"misspelling_rate: {misspelling_rate}\n")
elif misspell_mode == MisspellingMode.ConfusionMatrixBased:
f.write(f"cmx_file: {cmx_file}\n")
elif misspell_mode == MisspellingMode.Typos:
f.write(f"typos_file: {typos_file}\n")
f.write(f"Loss: {loss:.6} {result.detailed_results}\n")
f.write("-" * 100 + "\n")
log.info("'{}' function finished!".format(sys._getframe().f_code.co_name))
def train_tagger(model_dir, corpus, corpus_name, tag_type, embedding_type, train_mode, alpha=1.0,
misspelling_rate=0.0, cmx_file="", num_hidden=256, learning_rate=0.1, mini_batch_size=32,
max_epochs=100, train_with_dev=False, checkpoint=False, valid_with_misspellings=True):
"""
Trains a tagger model from scratch.
Parameters:
model_dir (str): output model path
corpus (ColumnCorpus): loaded corpus
corpus_name (str): name of the corpus used to load proper embeddings
tag_type (str): type of the tag to train
embedding_type (str): type of embeddings (e.g. flair, elmo, bert, word+char)
train_mode (TrainingMode): training mode
alpha (float): auxiliary loss weighting factor
misspelling_rate (float): misspelling rate (used in case of 'random' misspelling mode)
cmx_file (float): a confusion matrix file (used in case of 'confusion matrix' misspelling mode)
num_hidden (int): number of hidden layers of the tagger's LSTM
learning_rate (float): initial learning rate
mini_batch_size (int): the size of batches used by the evaluation function
max_epochs (int): maximum number of epochs to run
train_with_dev (bool): train using the development set
checkpoint (bool): save checkpoint files
valid_with_misspellings (bool): use validation with misspelling as additional measure
"""
# load embeddings
embeddings, embeddings_in_memory = init_embeddings(corpus_name, embedding_type=embedding_type)
# fixed parameters
use_crf = True
rnn_layers = 1
dropout, word_dropout, locked_dropout = 0.0, 0.05, 0.5
optimizer = SGD
# create the tagger model
from flair_ext.models import NATSequenceTagger
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
tagger: NATSequenceTagger = NATSequenceTagger(hidden_size=num_hidden, embeddings=embeddings,
tag_dictionary=tag_dictionary, tag_type=tag_type, use_crf=use_crf, use_rnn=rnn_layers>0,
rnn_layers=rnn_layers, dropout=dropout, word_dropout=word_dropout, locked_dropout=locked_dropout,
train_mode=train_mode, alpha=alpha, misspelling_rate=misspelling_rate, cmx_file=cmx_file)
# fixed parameters
anneal_factor = 0.5
patience = 3
anneal_with_restarts = False
num_workers = 8
# train the model
from flair_ext.trainers import ParameterizedModelTrainer
trainer: ParameterizedModelTrainer = ParameterizedModelTrainer(tagger, corpus, optimizer=optimizer, epoch=0, loss=10000.0)
trainer.train(model_dir, learning_rate=learning_rate, mini_batch_size=mini_batch_size, max_epochs=max_epochs,
anneal_factor=anneal_factor, patience=patience, train_with_dev=train_with_dev, monitor_train=False,
embeddings_in_memory=embeddings_in_memory, checkpoint=checkpoint, anneal_with_restarts=anneal_with_restarts,
shuffle=True, param_selection_mode=False, num_workers=num_workers, valid_with_misspellings=valid_with_misspellings)
plot_training_curves(model_dir)
log.info("'{}' function finished!".format(sys._getframe().f_code.co_name))
def fine_tune(model_dir, corpus, checkpoint_name, train_mode, alpha=1.0,
misspelling_rate = 0.0, cmx_file="", learning_rate=0.01, mini_batch_size=32, max_epochs=100,
train_with_dev=False, checkpoint=True, valid_with_misspellings=True):
"""
Fine-tunes an existing tagger model.
Parameters:
model_dir (str): output model path
corpus (str): loaded corpus
checkpoint_name (str): name of the checkpoint file
train_mode (TrainingMode): training mode
alpha (float): auxiliary loss weighting factor
misspelling_rate (float): misspelling rate (used in case of 'random' misspelling mode)
cmx_file (str): a confusion matrix file (used in case of 'confusion matrix' misspelling mode)
learning_rate (float): initial learning rate
mini_batch_size (int): the size of batches used by the evaluation function
max_epochs (int): maximum number of epochs to run
train_with_dev (bool): train using the development set
checkpoint (bool): save checkpoint files
valid_with_misspellings (bool): use validation with misspelling as additional measure
"""
checkpoint_path = os.path.join(model_dir, checkpoint_name)
# https://github.com/zalandoresearch/flair/issues/770
# from flair.models import NATSequenceTagger
from flair_ext.models import NATSequenceTagger
# load checkpoint file
checkpoint = NATSequenceTagger.load_checkpoint(checkpoint_path)
checkpoint['epoch'] = 0
checkpoint['model'].set_training_params(train_mode=train_mode, alpha=alpha, misspelling_rate=misspelling_rate, cmx_file=cmx_file)
# fixed parameters
optimizer = SGD
anneal_factor = 0.5
patience = 3
anneal_with_restarts = False
num_workers = 8
# train the model
from flair_ext.trainers import ParameterizedModelTrainer
trainer: ParameterizedModelTrainer = ParameterizedModelTrainer.load_from_checkpoint(checkpoint, corpus, optimizer=optimizer)
trainer.train(model_dir, learning_rate=learning_rate, mini_batch_size=mini_batch_size, max_epochs=max_epochs,
anneal_factor=anneal_factor, patience=patience, train_with_dev=train_with_dev, monitor_train=False,
embeddings_in_memory=True, checkpoint=checkpoint, anneal_with_restarts=anneal_with_restarts,
shuffle=True, param_selection_mode=False, num_workers=num_workers, valid_with_misspellings=valid_with_misspellings)
plot_training_curves(model_dir)
log.info("'{}' function finished!".format(sys._getframe().f_code.co_name))
def init_embeddings(corpus_name, embedding_type):
"""
Initializes embeddings for a given corpus.
Parameters:
corpus_name (str): name of the corpus used to load proper embeddings
embedding_type (str): type of embeddings (e.g. flair, elmo, bert, word+char)
Returns:
tuple(StackedEmbeddings, bool): loaded embeddings
"""
from typing import List
from flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings
from flair.embeddings import FlairEmbeddings
from flair.embeddings import BertEmbeddings, ELMoEmbeddings
from flair.embeddings import WordEmbeddings, CharacterEmbeddings
embedding_types: List[TokenEmbeddings] = []
if corpus_name in ['conll03_en']:
if embedding_type == 'flair':
embedding_types.append(WordEmbeddings('glove'))
embedding_types.append(FlairEmbeddings('news-forward'))
embedding_types.append(FlairEmbeddings('news-backward'))
embeddings_in_memory = True
elif embedding_type == 'bert':
embedding_types.append(BertEmbeddings(bert_model_or_path='bert-base-cased'))
#embedding_types.append(BertEmbeddings(bert_model_or_path='bert-large-cased'))
embeddings_in_memory = True
elif embedding_type == 'elmo':
embedding_types.append(ELMoEmbeddings())
embeddings_in_memory = True
elif embedding_type == 'word+char':
# similar to Lample et al. (2016)
embedding_types.append(WordEmbeddings('glove'))
embedding_types.append(CharacterEmbeddings())
embeddings_in_memory = False # because it contains a char model (problem with deepcopy)
else:
log.error(f"no settings for '{embedding_type}'!")
exit(EXIT_FAILURE)
elif corpus_name in ["conll03_de", "germeval"]:
if embedding_type == 'flair':
embedding_types.append(WordEmbeddings('de'))
embedding_types.append(FlairEmbeddings('german-forward'))
embedding_types.append(FlairEmbeddings('german-backward'))
embeddings_in_memory = True
elif embedding_type == 'word+char':
# similar to Lample et al. (2016)
embedding_types.append(WordEmbeddings('de'))
embedding_types.append(CharacterEmbeddings())
embeddings_in_memory = False # because it contains a char model (problem with deepcopy)
else:
log.error(f"no settings for '{embedding_type}'!")
exit(EXIT_FAILURE)
else:
log.error(f"unknown corpus or embeddings '{corpus_name}'!")
exit(EXIT_FAILURE)
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
log.info("'{}' function finished!".format(sys._getframe().f_code.co_name))
return embeddings, embeddings_in_memory
def load_corpus(corpus_name, col_idx, text_idx, tag_type='ner', downsample_perc=1.0,
name_train=None, name_dev=None, name_test=None, verbose=False):
"""
Loads a corpus with a given name.
Optionally performs downsampling of the data.
Parameters:
corpus_name (str): name of the corpus used to load proper embeddings
col_idx (int): index of the column's tag
text_idx (int): index of the text's tag
tag_type (str): type of the tag to load
downsample_rate (float): downsample rate (1.0 = full corpus)
name_train (str): name of a file containing the train set
name_dev (str): name of a file containing the development set
name_test (str): name of a file containing the test set
Returns:
ColumnCorpus: the loaded corpus
"""
from pathlib import Path
data_dir = f'resources/tasks/'
if corpus_name in ["conll03_en"]:
from flair.datasets import CONLL_03
corpus = CONLL_03(base_path=Path(data_dir), tag_to_bioes=tag_type)
elif corpus_name in ["conll03_de"]:
from flair.datasets import CONLL_03_GERMAN
corpus = CONLL_03_GERMAN(base_path=Path(data_dir), tag_to_bioes=tag_type)
elif corpus_name in ["germeval"]:
from flair.datasets import GERMEVAL
corpus = GERMEVAL(base_path=Path(data_dir), tag_to_bioes=tag_type)
else:
corpus_dir = f"{data_dir}{corpus_name}"
if not os.path.exists(corpus_dir):
log.error(f"Data directory '{corpus_dir}' does not exists!")
exit(EXIT_FAILURE)
from flair.datasets import ColumnCorpus
columns = { text_idx: 'text', col_idx: tag_type }
train_set = None if name_train is None else f'{name_train}'
dev_set = None if name_dev is None else f'{name_dev}'
test_set = None if name_test is None else f'{name_test}'
corpus: ColumnCorpus = ColumnCorpus(corpus_dir, columns, train_file=train_set, test_file=test_set, dev_file=dev_set,
tag_to_bioes=tag_type)
if downsample_perc >= 0.0 and downsample_perc < 1.0:
corpus.downsample(downsample_perc)
if verbose:
log.info(corpus.obtain_statistics(tag_type=tag_type))
log.info("'{}' function finished!".format(sys._getframe().f_code.co_name))
return corpus
def plot_training_curves(model_dir):
"""
Plots training curves given the model directory.
Parameters:
model_dir (str): model's directory
"""
from flair_ext.visual.training_curves import Plotter
plotter = Plotter()
plotter.plot_training_curves('{}/loss.tsv'.format(model_dir))
plotter.plot_weights('{}/weights.txt'.format(model_dir))
log.info("'{}' function finished!".format(sys._getframe().f_code.co_name))
def parse_args():
"""
Parses command-line arguments.
Returns:
parsed arguments
"""
import argparse
from robust_ner.enums import TrainingMode
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mode', dest='mode', type=str, help="execution mode",
choices=['train', 'tune', 'eval'], default='', required=True)
parser.add_argument('--corpus', dest='corpus', type=str, help="data set to use", default='', required=True)
parser.add_argument('--type', dest='embedding_type', type=str, help="embedding type",
choices=['flair', 'bert', 'word+char', 'elmo'], default='flair')
parser.add_argument('--model', dest='model', type=str, help="model path", default='', required=True)
parser.add_argument('--col_idx', dest='col_idx', type=int, help="ner tag column index", default=3)
parser.add_argument('--text_idx', dest='text_idx', type=int, help="text tag column index", default=0)
parser.add_argument('--device', dest='device', type=str, help="device to use", default='cuda')
parser.add_argument('--downsample', dest='downsample', type=float, help="downsample rate", default='1.0')
parser.add_argument('--checkpoint', dest='checkpoint', type=str, help="checkpoint file", default='best-model.pt')
parser.add_argument('--alpha', dest='alpha', type=float, help="auxiliary loss weight factor", default=1.0)
parser.add_argument('--misspelling_rate', dest='misspelling_rate', type=float,
help="misspellings rate used during training", default=0.0)
parser.add_argument('--train_mode', dest='train_mode', type=TrainingMode, help="training mode",
choices=list(TrainingMode), default=TrainingMode.Standard)
parser.add_argument('--verbose', dest='verbose', action='store_true', help="print verbose messages", default=False)
parser.add_argument('--num_hidden', dest='num_hidden', type=int, help="the number of hidden units of a tagger LSTM",
default=256)
parser.add_argument('--max_epochs', dest='max_epochs', type=int, help="max number of epochs to train", default=100)
parser.add_argument('--batch_size', dest='batch_size', type=int, help="mini batch size", default=32)
parser.add_argument('--lr', dest='learning_rate', type=float, help="initial learning rate", default=0.1)
parser.add_argument('--train_with_dev', dest='train_with_dev', action='store_true',
help="train using development data set", default=False)
parser.add_argument('--cmx_file', dest='cmx_file', type=str, help="confusion matrix file for training or evaluation",
default='')
parser.add_argument('--typos_file', dest='typos_file', type=str, help="typos file for evaluation", default='')
parser.add_argument('--spell_check', dest='spell_check', action='store_true',
help="use hunspell to automaticaly correct misspellings", default=False)
parser.add_argument('--no_valid_misspell', dest='no_valid_with_misspellings', action='store_true',
help="turns off the validation component that uses misspellings", default=False)
args = parser.parse_args()
log.info(args)
if args.device not in ['cpu', 'cuda', 'mkldnn', 'opengl', 'opencl', 'ideep', 'hip', 'msnpu']:
log.error("unknown args.device: '{}'".format(args.device))
exit(EXIT_FAILURE)
import torch, flair
if FIXED_RANDOM_SEEDS:
torch.manual_seed(0)
if args.device.startswith("cuda") and not torch.cuda.is_available():
args.device = "cpu"
flair.device = torch.device(args.device)
if args.col_idx < 0:
log.error("invalid args.col_idx: '{}'".format(args.col_idx))
exit(EXIT_FAILURE)
if not 0.0 < args.downsample <= 1.0:
log.error("invalud args.downsample: '{}'".format(args.downsample))
exit(EXIT_FAILURE)
if len(args.corpus) == 0:
log.error("invalid args.corpus: '{}'".format(args.corpus))
exit(EXIT_FAILURE)
log.info("'{}' function finished!".format(sys._getframe().f_code.co_name))
return args
if __name__ == "__main__":
logging.basicConfig()
logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
current_directory = os.path.dirname(os.path.abspath(__file__))
# add the current directory to the system path to use functions from the robust_ner library
sys.path.append(current_directory)
# add sub-folder containint the flair library to the system path
sys.path.append(os.path.join(current_directory, "flair"))
# parse command-line arguments
args = parse_args()
model_name = args.model
# if the model_name is not an absolute path - assume it is placed in the 'resources/taggers' sub-directory
if os.path.isabs(model_name):
model_dir = model_name
else:
model_dir = os.path.join("resources/taggers", model_name)
# join the full model path
model_path = os.path.join(model_dir, args.checkpoint)
# if the given path does not exists, check whether it could be a built-in model
if not os.path.exists(model_path) and model_name in ['ner', 'de-ner']:
model_path = model_name
# load the corpus
tag_type = 'ner'
corpus = load_corpus(args.corpus, args.col_idx, args.text_idx, tag_type, args.downsample, verbose=args.verbose)
# optionaly, initialize the spell checker
if args.spell_check:
from robust_ner.spellcheck import init_spellchecker
spell_check = init_spellchecker(args.corpus)
else:
spell_check = None
print(f"Using '{spell_check}' spell checker")
if args.mode == 'train':
train_tagger(model_dir, corpus, args.corpus, tag_type, embedding_type=args.embedding_type,
train_mode=args.train_mode, alpha=args.alpha, misspelling_rate=args.misspelling_rate,
cmx_file=args.cmx_file, num_hidden=args.num_hidden, max_epochs=args.max_epochs,
learning_rate=args.learning_rate, train_with_dev=args.train_with_dev, mini_batch_size=args.batch_size,
valid_with_misspellings=not args.no_valid_with_misspellings)
elif args.mode == 'tune':
fine_tune(model_dir, corpus, args.checkpoint, train_mode=args.train_mode, alpha=args.alpha,
misspelling_rate=args.misspelling_rate, max_epochs=args.max_epochs, cmx_file=args.cmx_file,
learning_rate=args.learning_rate, train_with_dev=args.train_with_dev, mini_batch_size=args.batch_size,
valid_with_misspellings=not args.no_valid_with_misspellings)
elif args.mode == 'eval':
evaluate(model_path, corpus, misspelling_rate=args.misspelling_rate, cmx_file=args.cmx_file, typos_file=args.typos_file,
spell_check=spell_check)
else:
print("unknown mode")
exit(EXIT_FAILURE)
| 22,210 | 44.144309 | 133 | py |
nat-acl2020 | nat-acl2020-master/robust_ner/enums.py | from enum import Enum
class TrainingMode(Enum):
"""
Training mode (one of: standard, stability, augmentation)
"""
Standard = 'standard'
Stability = 'stability'
Augmentation = 'augmentation'
def __str__(self):
return self.name
class EvalMode(Enum):
"""
Evaluation mode (one of: standard, misspellings)
"""
Standard = 'standard'
Misspellings = 'misspellings'
def __str__(self):
return self.name
class MisspellingMode(Enum):
"""
Misspellings mode (one of: rand, cmx, typos)
"""
Random = 'rand'
ConfusionMatrixBased = 'cmx'
Typos = 'typos'
def __str__(self):
return self.name | 686 | 17.078947 | 61 | py |
nat-acl2020 | nat-acl2020-master/robust_ner/embeddings.py | import logging
import torch
from typing import List
from flair.data import Sentence
log = logging.getLogger("flair")
def check_embeddings(sentList1: List[Sentence], sentList2: List[Sentence], embed1: torch.tensor, embed2: torch.tensor):
"""
Checks embeddings of the original and perturbed sentences.
Returns false if any token of the first sentence has the same embeddings but different text as the
corresponding token of the second sentence
"""
for i, (s1, s2) in enumerate(zip(sentList1, sentList2)):
for j, (tok1, tok2) in enumerate(zip(s1, s2)):
text1, text2 = tok1.text, tok2.text
e1, e2 = embed1[i][j], embed2[i][j]
diff = torch.sum(e1 - e2).item()
if text1 != text2 and diff == 0.0:
log.error(
f"ERROR: same embeddings, different text! "
f"diff={diff} text1: {text1} text2: {text2}"
)
return False
return True | 1,019 | 33 | 119 | py |
nat-acl2020 | nat-acl2020-master/robust_ner/noise.py | import math
import logging
import random
import numpy as np
from robust_ner.confusion_matrix import noise_sentences_cmx
from robust_ner.vanilla_noise import noise_sentences_vanilla
from robust_ner.typos import noise_sentences_typos
from robust_ner.enums import MisspellingMode
def make_char_vocab(sentences):
"""
Construct the character vocabulary from the given sentences
"""
char_vocab = set()
for sentence in sentences:
_update_char_vocab(sentence, char_vocab)
return char_vocab
def _update_char_vocab(sentence, char_vocab: set):
"""
Updates the character vocabulary using a single sentence
"""
for token in sentence:
if len(token.text) > 0:
char_vocab.update([s for s in set(token.text) if not s.isspace()])
def noise_sentences(sentences, misspell_mode, noise_level = 0.0, char_vocab = {}, cmx = None, lut = {}, typos = {}, verbose: bool = False):
"""
Induces noise on the given list of sentences
"""
if misspell_mode == MisspellingMode.ConfusionMatrixBased:
return noise_sentences_cmx(sentences, cmx, lut)
elif misspell_mode == MisspellingMode.Typos:
return noise_sentences_typos(sentences, typos, noise_level)
else:
return noise_sentences_vanilla(sentences, char_vocab, noise_level, verbose)
| 1,346 | 27.659574 | 139 | py |
nat-acl2020 | nat-acl2020-master/robust_ner/confusion_matrix.py | import os.path
import csv
import math
import logging
import random
import numpy as np
def load_confusion_matrix(cmx_file_name, separator=' '):
"""
Loads a confusion matrix from a given file.
NULL - token that represents the epsilon character used to define.
the deletion and insertion operations.
WS - white-space character.
Rows represent original characters, column - perturbed characters.
Deletion of a character - NULL token in a column header (original->NULL)
Insertion of a character - NULL token in a row header (NULL->result)
ConfusionMatrix[NULL][NULL] == 0.0
File format:
- 1st row: vocabulary, e.g., VOCAB a b c ... x y z
- next |V| rows: rows of the confusion matrix, where V is vocabulary of characters
"""
log = logging.getLogger("flair")
# read input file (e.g., ocr.cmx)
source_path = os.path.join(f"resources/cmx/", f"{cmx_file_name}.cmx")
log.info(f"Confusion matrix path: {source_path}")
vocab = None
cmx = None # confusion matrix
with open(source_path, "r") as input_file:
reader = csv.reader(input_file, delimiter=separator)
for row in reader:
if len(row) == 0 or row[0].startswith('#'):
continue
# print(row)
if vocab is None:
row = [c[1:-1] for c in row if len(c) > 0] # strip first and last character
vocab = row
else:
row = [c for c in row if len(c) > 0]
cmx = np.array(row) if cmx is None else np.vstack([cmx, row])
cmx = cmx.astype(np.float)
lut = make_lut_from_vocab(vocab)
# remove rows and columns of some characters (e.g., WS)
to_delete = [c for c in ['WS'] if c in lut]
for c in to_delete:
cmx, lut, vocab = _remove_char_from_cmx(c, cmx, lut, vocab)
# np.set_printoptions(precision=4, floatmode='fixed', suppress=True)
# log.info(f"Vocabulary:\n{vocab}")
# log.info(f"LUT:\n{lut}")
# log.info(f"Confusion matrix:\n{cmx}")
# log.info(f"p(c -> b): {query_cmx('c', 'b', cmx, lut)}")
# log.info(f"p(NULL -> c): {query_cmx('NULL','c', cmx, lut)}")
cmx = _normalize_cmx(cmx)
return cmx, lut
def print_cmx(print_func, cmx, precision=2):
"""
Prints the confusion matrix with a given print function
"""
np.set_printoptions(precision=precision, floatmode='fixed', suppress=True)
print_func(cmx)
def query_cmx(orig_char, result_char, cmx, lut):
"""
Helper function for querying a value from the confusion matrix
"""
return cmx[lut[orig_char], lut[result_char]]
#return cmx[vocab.index('c'), vocab.index('b')]
def _normalize_cmx(cmx):
"""
Normalizes the rows of the confusion matrix, so that they form
valid probability distributions.
Assigns zero probability if all elements in a row are zero.
"""
cmx_row_sums = cmx.sum(axis=1)[:, np.newaxis]
cmx = np.divide(cmx, cmx_row_sums, out=np.zeros_like(cmx), where=cmx_row_sums!=0)
return cmx
def filter_cmx(cmx, lut, corpus_vocab):
"""
Removes from the confusion matrix all characters that do not appear in the corpus
"""
log = logging.getLogger("flair")
# re-create vocabulary from LUT
cmx_vocab = make_vocab_from_lut(lut)
to_delete = [c for c in cmx_vocab if c not in corpus_vocab and c not in ['NULL']]
log.info(f"Characters to delete from the confusion matrix: {to_delete}")
# remove rows and columns of confusion matrix that do not appear in a given vocabulary
for c in to_delete:
cmx, lut, cmx_vocab =_remove_char_from_cmx(c, cmx, lut, cmx_vocab)
cmx = _normalize_cmx(cmx)
return cmx, lut
def _remove_char_from_cmx(c, cmx, lut, vocab):
"""
Removes a given character from the confusion matrix
"""
idx = lut.get(c, -1)
if idx >= 0:
# log.info(f"'{c}' removed from the confusion matrix.")
cmx = np.delete(cmx, (idx), axis=0) # delete row
cmx = np.delete(cmx, (idx), axis=1) # delete column
vocab.pop(idx)
# lut.pop(c, None)
# LUT must be re-calculated
lut = make_lut_from_vocab(vocab)
return cmx, lut, vocab
def make_vocab_from_lut(lut):
return [k for k,v in lut.items()]
def make_lut_from_vocab(vocab):
return { c:i for i, c in enumerate(vocab) }
def induce_noise_cmx(input_text, cmx, lut):
"""
Induces noise into the input text using the confusion matrix
"""
log = logging.getLogger("flair")
# re-create vocabulary from LUT
vocab = make_vocab_from_lut(lut)
# print(f"vocab={vocab}")
n_classes = len(lut)
input_chars, output_chars = list(input_text), []
cnt_modifications = 0
# _i_t_e_m_
# 012345678
for i in range(len(input_chars) * 2 + 1):
input_char = input_chars[i // 2] if (i % 2 == 1) else 'NULL'
row_idx = lut.get(input_char, -1)
result_char = input_char
if row_idx >= 0:
prob = cmx[row_idx]
prob_sum = prob.sum()
if math.isclose(prob_sum, 1.0):
rand_idx = np.random.choice(n_classes, p=prob)
result_char = vocab[rand_idx]
else:
log.warning(f"Probabilities do not sum to 1 ({prob_sum}) for row_idx={row_idx} (input_char={input_char})!")
else:
log.warning(f"LUT key for '{input_char}' does not exists!")
# print(f"{input_char} -> {result_char}")
if result_char != 'NULL':
output_chars.append(result_char)
if input_char != result_char:
# print(f"{input_char} -> {result_char}")
cnt_modifications += 1
output_text = "".join(output_chars)
if len(output_text) == 0:
output_text = input_text
cnt_modifications = 0
return output_text, cnt_modifications
def noise_sentences_cmx(sentences, cmx, lut):
"""
Induces noise on the list of sentences using the confusion matrix
"""
from copy import deepcopy
noised_sentences = deepcopy(sentences)
cnt_token_modifications = 0
for sentence in noised_sentences:
for token in sentence:
token.text, cnt_modif = induce_noise_cmx(token.text, cmx, lut)
if cnt_modif > 0:
cnt_token_modifications += 1
return noised_sentences, cnt_token_modifications
| 6,544 | 30.618357 | 127 | py |
nat-acl2020 | nat-acl2020-master/robust_ner/vanilla_noise.py | import math
import logging
import random
import numpy as np
from robust_ner.confusion_matrix import make_lut_from_vocab
def induce_noise_vanilla(input_text, char_vocab, noise_level):
"""
Induces noise into the input text using a vanilla noise model.
"""
log = logging.getLogger("flair")
vocab = list(char_vocab)
vocab.insert(len(vocab), "NULL")
# print(f"vocab={vocab}")
lut = make_lut_from_vocab(vocab)
n_classes = len(lut)
input_chars, output_chars = list(input_text), []
cnt_modifications, cnt_subst, cnt_ins, cnt_del = 0, 0, 0, 0
cnt_chars = len(input_chars)
weight_ins = cnt_chars / (cnt_chars + 1)
prob_change = noise_level / 3
row_idx_null = lut.get('NULL', -1)
# _i_t_e_m_
# 012345678
for i in range(cnt_chars * 2 + 1):
input_char = input_chars[i // 2] if (i % 2 == 1) else 'NULL'
row_idx = lut.get(input_char, -1)
result_char = input_char
if row_idx >= 0:
# P(no_change) = 1.0 - noise_level
# P(change) = noise_level, spreaded across all elements, except the 'no_change' element
if input_char == 'NULL':
prob_insert = prob_change * weight_ins
prob = np.full((n_classes), prob_insert / (n_classes - 1))
prob[row_idx] = 1 - prob_insert # no-change
else:
prob = np.full((n_classes), prob_change / (n_classes - 2))
prob[row_idx_null] = prob_change # prob-delete
prob[row_idx] = 1 - 2 * prob_change # no-change
prob_sum = prob.sum()
if math.isclose(prob_sum, 1.0):
rand_idx = np.random.choice(n_classes, p=prob)
result_char = vocab[rand_idx]
else:
log.warning(f"Probabilities do not sum to 1 ({prob_sum}) for row_idx={row_idx} (input_char={input_char})!")
else:
log.warning(f"LUT key for '{input_char}' does not exists!")
# print(f"{input_char} -> {result_char}")
if result_char == 'NULL' and result_char != input_char:
cnt_del += 1
elif input_char == 'NULL' and result_char != input_char:
cnt_ins += 1
elif input_char != result_char:
cnt_subst += 1
if result_char != 'NULL':
output_chars.append(result_char)
if input_char != result_char:
cnt_modifications += 1
output_text = "".join(output_chars)
if len(output_text) == 0:
output_text = input_text
return output_text, cnt_modifications, cnt_subst, cnt_ins, cnt_del
def _noise_sentences_vanilla_verbose(sentences, char_vocab, noise_level):
"""
Induces noise on the given list of sentences with verbose logging
"""
log = logging.getLogger("flair")
from copy import deepcopy
noised_sentences = deepcopy(sentences)
cnt_chars = sum([len(token.text) for sentence in sentences for token in sentence], 0)
cnt_tokens = sum([len(sentence.tokens) for sentence in sentences], 0)
cnt_sentences = len(sentences)
cnt_char_modifications, cnt_token_modifications, cnt_sent_modifications = 0, 0, 0
for sentence in noised_sentences:
sent_modified = False
for token in sentence:
noised_text, cnt_modifications, _, _, _ = induce_noise_vanilla(token.text, char_vocab, noise_level)
# if verbose and cnt_modifications > 0:
# log.info("{0} -> {1} (cnt_modif: {2})".format(token.text, noised_text, cnt_modifications))
token.text = noised_text
if cnt_modifications > 0:
cnt_char_modifications += cnt_modifications
cnt_token_modifications += 1
sent_modified = True
if sent_modified:
cnt_sent_modifications += 1
SER = cnt_sent_modifications * 100.0 / cnt_sentences
TER = cnt_token_modifications * 100.0 / cnt_tokens
CER = cnt_char_modifications * 100.0 / cnt_chars
log.info(
f"SER:{SER:.1f}({cnt_sent_modifications}/{cnt_sentences}), "
f"TER:{TER:.1f}({cnt_token_modifications}/{cnt_tokens}), "
f"CER:{CER:.1f}({cnt_char_modifications}/{cnt_chars})")
# for i, sentence in enumerate(sentences):
# modified_sentence = noised_sentences[i]
# print("{} -> {}".format(sentence, modified_sentence))
return noised_sentences, cnt_token_modifications
def _noise_sentences_vanilla_quiet(sentences, char_vocab, noise_level):
"""
Induces noise on the given list of sentences without verbose logging
"""
from copy import deepcopy
noised_sentences = deepcopy(sentences)
cnt_noised_tokens = 0
for sentence in noised_sentences:
for token in sentence:
token.text, cnt_modif, _, _, _ = induce_noise_vanilla(token.text, char_vocab, noise_level)
if cnt_modif > 0:
cnt_noised_tokens += 1
return noised_sentences, cnt_noised_tokens
def noise_sentences_vanilla(sentences, char_vocab, noise_level, verbose: bool):
"""
Induces noise on the given list of sentences using the vanilla noise model
"""
if verbose:
return _noise_sentences_vanilla_verbose(sentences, char_vocab, noise_level)
else:
return _noise_sentences_vanilla_quiet(sentences, char_vocab, noise_level)
| 5,584 | 33.90625 | 127 | py |
nat-acl2020 | nat-acl2020-master/robust_ner/spellcheck.py | import hunspell
def init_spellchecker(corpus):
"""
Initializes the spell checker.
It uses the corpus information to choose a proper language for spell checker.
Returns the initialied spell checker
"""
if corpus in ["conll03_en", "ontonotes"]:
spell_check = hunspell.HunSpell('/usr/share/hunspell/en_US.dic', '/usr/share/hunspell/en_US.aff')
elif corpus in ["conll03_de", "germeval"]:
spell_check = hunspell.HunSpell('/usr/share/hunspell/de_DE.dic', '/usr/share/hunspell/de_DE.aff')
else:
spell_check = None
return spell_check
def correct_text(spellcheck, input):
"""
Checks whether the input is correctly spelled and correct it otherwise
Returns the corrected input
"""
output = input
ok = spellcheck.spell(input)
if not ok:
suggestions = spellcheck.suggest(input)
if len(suggestions) > 0:
output = suggestions[0]
# print(f"{input} -> {output}")
return output
def correct_sentences(spellcheck, sentences):
"""
Corrects all tokens in the given sentences using the given spell checker
Returns the corrected sentences
"""
from copy import deepcopy
corrected_sentences = deepcopy(sentences)
for sentence in corrected_sentences:
for token in sentence:
token.text = correct_text(spellcheck, token.text)
return corrected_sentences | 1,441 | 29.041667 | 113 | py |
nat-acl2020 | nat-acl2020-master/robust_ner/typos.py | import os.path
import logging
import random
import numpy as np
def load_typos(file_name, char_vocab = {}, filter_OOA_chars = False):
"""
Loads typos from a given file.
Optionally, filters all entries that contain out-of-alphabet characters.
"""
_, ext = os.path.splitext(file_name)
if ext == ".tsv":
typos = load_typos_moe(file_name)
else:
typos = load_typos_belinkov_bisk(file_name)
if filter_OOA_chars:
typos = _filter_typos(typos, char_vocab)
return typos
def load_typos_moe(file_name):
"""
Loads and returns a typos dictionary from a given file.
Designed for Misspelling Oblivious Word Embeddings (MOE):
https://github.com/facebookresearch/moe
"""
# log = logging.getLogger("robust_ner")
file_path = os.path.join(f"resources/typos/", f"{file_name}")
typos = dict()
for line in open(file_path):
line = line.strip().split()
if len(line) != 2:
#log.warning(f"len(line) = {len(line)} != 2 (line: {line})")
continue
value = line[0]
key = line[1]
#print(key, value)
if key not in typos:
typos[key] = list()
typos[key].append(value)
return typos
def load_typos_belinkov_bisk(file_name):
"""
Loads and returns a typos dictionary from a given file
Credit: https://github.com/ybisk/charNMT-noise/blob/master/scrambler.py
"""
file_path = os.path.join(f"resources/typos/", f"{file_name}")
typos = {}
for line in open(file_path):
line = line.strip().split()
typos[line[0]] = line[1:]
return typos
def _filter_typos(typos, char_vocab):
"""
Filters typos that contain out of the alphabet symbols
"""
new_typos = dict()
for key,values in typos.items():
new_values = list()
for v in values:
invalid_chars = [c for c in v if c not in char_vocab]
if len(invalid_chars) > 0:
continue
new_values.append(v)
if len(new_values) > 0:
new_typos[key] = new_values
return new_typos
def induce_noise_typos(input_token, typos : dict, prob = 1.0):
"""
Induces a random typo into the input token with a given probability.
Credit: https://github.com/ybisk/charNMT-noise/blob/master/scrambler.py
"""
if input_token in typos and random.random() <= prob:
typos_for_token = typos[input_token]
typo_idx = random.randint(0, len(typos_for_token) - 1)
typo = typos_for_token[typo_idx]
return typo, typo != input_token
else:
return input_token, False
def noise_sentences_typos(sentences, typos : dict, prob = 1.0):
"""
Induces noise on the given list of sentences using a LUT of typos.
"""
from copy import deepcopy
noised_sentences = deepcopy(sentences)
cnt_noised_tokens = 0
for sentence in noised_sentences:
for token in sentence:
token.text, noised = induce_noise_typos(token.text, typos, prob)
if noised:
cnt_noised_tokens += 1
return noised_sentences, cnt_noised_tokens
| 3,257 | 25.487805 | 84 | py |
nat-acl2020 | nat-acl2020-master/flair_ext/nn.py | import warnings
from pathlib import Path
import torch.nn
from abc import abstractmethod
from typing import Union, List
import flair
from flair.data import Sentence
from flair.training_utils import Result
from flair.nn import Model
class ParameterizedModel(Model):
"""Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
Every new type of model must implement these methods."""
@abstractmethod
def forward_loss(self, sentences: Union[List[Sentence], Sentence], params: dict = {}) -> (torch.tensor, dict):
"""Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training."""
pass
| 711 | 28.666667 | 119 | py |
nat-acl2020 | nat-acl2020-master/flair_ext/models/nat_sequence_tagger_model.py | import logging
import sys
import numpy as np
from pathlib import Path
import torch.nn
import torch.nn.functional as F
from torch.utils.data.dataset import Dataset
import flair.nn
import torch
import flair.embeddings
from flair.data import Dictionary, Sentence, Token, Label
from flair.datasets import DataLoader
from typing import List, Union
from enum import Enum
from flair.training_utils import clear_embeddings, Metric, Result
from flair.models import SequenceTagger
from robust_ner.enums import TrainingMode, EvalMode, MisspellingMode
from robust_ner.noise import noise_sentences
from robust_ner.embeddings import check_embeddings
from flair_ext.nn import ParameterizedModel
from tqdm import tqdm
log = logging.getLogger("flair")
def get_masked_sum(loss_unreduced, lengths):
loss_sum = 0
for batch_idx, length in enumerate(lengths):
loss_sum += loss_unreduced[batch_idx][:length].sum()
return loss_sum
def get_per_token_mean(loss_sum, lengths):
return loss_sum / float(sum(lengths))
def get_per_batch_mean(loss_sum, lengths):
return loss_sum / float(len(lengths))
class NATSequenceTagger(SequenceTagger, ParameterizedModel):
def __init__(
self,
hidden_size: int,
embeddings: flair.embeddings.TokenEmbeddings,
tag_dictionary: Dictionary,
tag_type: str,
use_crf: bool = True,
use_rnn: bool = True,
rnn_layers: int = 1,
dropout: float = 0.0,
word_dropout: float = 0.05,
locked_dropout: float = 0.5,
pickle_module: str = "pickle",
train_mode: TrainingMode = TrainingMode.Standard,
alpha: float = 1.0,
misspelling_rate: float = 0.0,
cmx_file = "",
):
super(NATSequenceTagger, self).__init__(hidden_size, embeddings, tag_dictionary, tag_type,
use_crf, use_rnn, rnn_layers, dropout, word_dropout, locked_dropout, pickle_module)
self.set_training_params(train_mode, alpha, misspelling_rate, cmx_file)
def set_training_params(self, train_mode: TrainingMode, alpha: float = 1.0, misspelling_rate: float = 0.0, cmx_file = ""):
self.train_mode = train_mode
self.alpha = alpha
self.misspelling_rate_train = misspelling_rate
self.cmx_file_train = cmx_file
if self.cmx_file_train:
self.misspell_mode = MisspellingMode.ConfusionMatrixBased
else:
self.misspell_mode = MisspellingMode.Random
def _get_state_dict(self):
model_state = super(NATSequenceTagger, self)._get_state_dict()
model_state["train_mode"] = self.train_mode
return model_state
def _init_model_with_state_dict(state):
use_dropout = 0.0 if not "use_dropout" in state.keys() else state["use_dropout"]
use_word_dropout = (
0.0 if not "use_word_dropout" in state.keys() else state["use_word_dropout"]
)
use_locked_dropout = (
0.0
if not "use_locked_dropout" in state.keys()
else state["use_locked_dropout"]
)
train_mode = TrainingMode.Standard if not "train_mode" in state.keys() else state["train_mode"]
model = NATSequenceTagger(
hidden_size=state["hidden_size"],
embeddings=state["embeddings"],
tag_dictionary=state["tag_dictionary"],
tag_type=state["tag_type"],
use_crf=state["use_crf"],
use_rnn=state["use_rnn"],
rnn_layers=state["rnn_layers"],
dropout=use_dropout,
word_dropout=use_word_dropout,
locked_dropout=use_locked_dropout,
train_mode=train_mode
)
model.load_state_dict(state["state_dict"])
return model
def evaluate(
self,
sentences: Dataset,
eval_mini_batch_size: int = 32,
embeddings_in_memory: bool = True,
out_path: Path = None,
num_workers: int = 8,
eval_mode: EvalMode = EvalMode.Standard,
misspell_mode: MisspellingMode = MisspellingMode.Random,
misspelling_rate: float = 0.0,
char_vocab: set = {},
lut: dict = {},
cmx: np.array = None,
typos: dict = {},
spell_check = None,
) -> (Result, float):
eval_params = {}
eval_params["eval_mode"] = eval_mode
eval_params["misspelling_rate"] = misspelling_rate
eval_params["misspell_mode"] = misspell_mode
eval_params["char_vocab"] = char_vocab
eval_params["lut"] = lut
eval_params["cmx"] = cmx
eval_params["typos"] = typos
eval_params["embeddings_in_memory"] = embeddings_in_memory
eval_params["spell_check"] = spell_check
with torch.no_grad():
eval_loss = 0
batch_no: int = 0
batch_loader = DataLoader(
sentences,
batch_size=eval_mini_batch_size,
shuffle=False,
num_workers=num_workers,
)
metric = Metric("Evaluation")
lines: List[str] = []
for batch in batch_loader:
batch_no += 1
with torch.no_grad():
features = self.forward(batch, eval_params)
loss = self._calculate_loss(features, batch)
tags, _ = self._obtain_labels(features, batch)
eval_loss += loss
for (sentence, sent_tags) in zip(batch, tags):
for (token, tag) in zip(sentence.tokens, sent_tags):
token: Token = token
token.add_tag_label("predicted", tag)
# append both to file for evaluation
eval_line = "{} {} {} {}\n".format(
token.text,
token.get_tag(self.tag_type).value,
tag.value,
tag.score,
)
lines.append(eval_line)
lines.append("\n")
for sentence in batch:
# make list of gold tags
gold_tags = [
(tag.tag, str(tag)) for tag in sentence.get_spans(self.tag_type)
]
# make list of predicted tags
predicted_tags = [
(tag.tag, str(tag)) for tag in sentence.get_spans("predicted")
]
# check for true positives, false positives and false negatives
for tag, prediction in predicted_tags:
if (tag, prediction) in gold_tags:
metric.add_tp(tag)
else:
metric.add_fp(tag)
for tag, gold in gold_tags:
if (tag, gold) not in predicted_tags:
metric.add_fn(tag)
else:
metric.add_tn(tag)
clear_embeddings(
batch, also_clear_word_embeddings=not embeddings_in_memory
)
eval_loss /= batch_no
if out_path is not None:
with open(out_path, "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
detailed_result = (
f"\nMICRO_AVG: acc {metric.micro_avg_accuracy()} - f1-score {metric.micro_avg_f_score()}"
f"\nMACRO_AVG: acc {metric.macro_avg_accuracy()} - f1-score {metric.macro_avg_f_score()}"
)
for class_name in metric.get_classes():
detailed_result += (
f"\n{class_name:<10} tp: {metric.get_tp(class_name)} - fp: {metric.get_fp(class_name)} - "
f"fn: {metric.get_fn(class_name)} - tn: {metric.get_tn(class_name)} - precision: "
f"{metric.precision(class_name):.4f} - recall: {metric.recall(class_name):.4f} - "
f"accuracy: {metric.accuracy(class_name):.4f} - f1-score: "
f"{metric.f_score(class_name):.4f}"
)
result = Result(
main_score=metric.micro_avg_f_score(),
log_line=f"{metric.precision()}\t{metric.recall()}\t{metric.micro_avg_f_score()}",
log_header="PRECISION\tRECALL\tF1",
detailed_results=detailed_result,
)
return result, eval_loss
def predict(
self,
sentences: Union[List[Sentence], Sentence],
eval_mode: EvalMode = EvalMode.Standard,
mini_batch_size=32,
embeddings_in_memory: bool = True,
verbose=False,
misspell_mode: MisspellingMode = MisspellingMode.Random,
misspelling_rate: float = 0.0,
char_vocab: set = {},
lut: dict = {},
cmx: np.array = None,
typos: dict = {},
spell_check = None,
) -> List[Sentence]:
predict_params = {}
predict_params["eval_mode"] = eval_mode
predict_params["misspelling_rate"] = misspelling_rate
predict_params["misspell_mode"] = misspell_mode
predict_params["char_vocab"] = char_vocab
predict_params["lut"] = lut
predict_params["cmx"] = cmx
predict_params["typos"] = typos
predict_params["embeddings_in_memory"] = embeddings_in_memory
predict_params["spell_check"] = spell_check
with torch.no_grad():
if isinstance(sentences, Sentence):
sentences = [sentences]
filtered_sentences = self._filter_empty_sentences(sentences)
# remove previous embeddings
clear_embeddings(filtered_sentences, also_clear_word_embeddings=True)
# revere sort all sequences by their length
filtered_sentences.sort(key=lambda x: len(x), reverse=True)
# make mini-batches
batches = [
filtered_sentences[x : x + mini_batch_size]
for x in range(0, len(filtered_sentences), mini_batch_size)
]
# progress bar for verbosity
if verbose:
batches = tqdm(batches)
for i, batch in enumerate(batches):
if verbose:
batches.set_description(f"Inferencing on batch {i}")
with torch.no_grad():
feature = self.forward(batch, predict_params)
tags, all_tags = self._obtain_labels(feature, batch)
for (sentence, sent_tags, sent_all_tags) in zip(batch, tags, all_tags):
for (token, tag, token_all_tags) in zip(
sentence.tokens, sent_tags, sent_all_tags
):
token.add_tag_label(self.tag_type, tag)
token.add_tags_proba_dist(self.tag_type, token_all_tags)
# clearing token embeddings to save memory
clear_embeddings(batch, also_clear_word_embeddings=True)
return sentences
def forward_loss(
self, sentences: Union[List[Sentence], Sentence], params: dict = {}
) -> (torch.tensor, dict):
verbose = params.get("verbose", False)
char_vocab = params.get("char_vocab", {})
cmx = params.get("cmx", {})
lut = params.get("lut", {})
embeddings_in_memory = params.get("embeddings_in_memory", True)
auxilary_losses = {}
alpha = self.alpha
misspelling_rate_train = self.misspelling_rate_train
self.zero_grad()
if self.train_mode == TrainingMode.Standard:
loss = self._forward_loss_standard(sentences)
elif self.train_mode == TrainingMode.Stability:
loss, auxilary_losses = self._forward_loss_stability(sentences, alpha=alpha,
misspelling_rate=misspelling_rate_train, embeddings_in_memory=embeddings_in_memory,
char_vocab=char_vocab, cmx=cmx, lut=lut, verbose=verbose)
elif self.train_mode == TrainingMode.Augmentation:
loss, auxilary_losses = self._forward_loss_data_augmentation(sentences, alpha=alpha,
misspelling_rate=misspelling_rate_train, cmx=cmx, lut=lut,
embeddings_in_memory=embeddings_in_memory, char_vocab=char_vocab, verbose=verbose)
else:
raise Exception("Training mode '{}' is not supported!".format(self.train_mode))
return loss, auxilary_losses
def _forward_loss_standard(
self, sentences: Union[List[Sentence], Sentence]
) -> torch.tensor:
features = self._forward_standard(sentences)
return self._calculate_loss(features, sentences)
def _forward_loss_data_augmentation(
self, sentences: Union[List[Sentence], Sentence], alpha: float, misspelling_rate: float,
char_vocab: dict, lut: dict = {}, cmx: np.array = None, embeddings_in_memory: bool = True, verbose: bool = False
) -> (torch.tensor, dict):
"""
Data augmentation objective. Returns the auxiliary loss as the sum of standard objectives calculated on the
original and the perturbed samples.
"""
misspelled_sentences, _ = noise_sentences(sentences, self.misspell_mode, misspelling_rate, char_vocab, cmx, lut, {}, verbose)
clear_embeddings(misspelled_sentences, also_clear_word_embeddings=True)
embeddings, lengths = self._embed_sentences(sentences)
embeddings_misspell, lengths_misspell = self._embed_sentences(misspelled_sentences)
if not check_embeddings(sentences, misspelled_sentences, embeddings, embeddings_misspell):
log.warning("WARNING: embedding of the misspelled text may be invalid!")
outputs_base, _ = self._forward(embeddings, lengths)
outputs_misspell, _ = self._forward(embeddings_misspell, lengths_misspell)
loss_base = self._calculate_loss(outputs_base, sentences)
loss_misspell = alpha * self._calculate_loss(outputs_misspell, misspelled_sentences)
auxilary_losses = { 'loss_base': loss_base, 'loss_misspell': loss_misspell }
return (loss_base + loss_misspell), auxilary_losses
def _forward_loss_stability(
self, sentences: Union[List[Sentence], Sentence], alpha: float, misspelling_rate: float, char_vocab: dict,
lut: dict = {}, cmx: np.array = None, embeddings_in_memory: bool = True, verbose: bool = False
) -> (torch.tensor, dict):
"""
stability objective for classification -> KL divergence (see Zheng 2016 Eq.10)
L_stab(x,x') = -sum_j(P(yj|x)*log(P(yj|x')))
The output loss is the sum of the standard loss and the similarity objective.
"""
misspelled_sentences, _ = noise_sentences(sentences, self.misspell_mode, misspelling_rate, char_vocab, cmx, lut, {}, verbose)
clear_embeddings(misspelled_sentences, also_clear_word_embeddings=True)
embeddings, lengths = self._embed_sentences(sentences)
embeddings_misspell, lengths_misspell = self._embed_sentences(misspelled_sentences)
if not check_embeddings(sentences, misspelled_sentences, embeddings, embeddings_misspell):
log.warning("WARNING: embedding of the misspelled text may be invalid!")
outputs_base, features_base = self._forward(embeddings, lengths)
outputs_misspell, features_misspell = self._forward(embeddings_misspell, lengths_misspell)
loss_base = self._calculate_loss(outputs_base, sentences)
target_distrib = F.softmax(outputs_base, dim=2).transpose(1, 2).detach()
input_log_distrib = F.log_softmax(outputs_misspell, dim=2).transpose(1, 2)
loss_stability = alpha * F.kl_div(input_log_distrib, target_distrib, reduction='none').transpose(2, 1)
loss_sum = get_masked_sum(loss_stability, lengths)
loss_mean = get_per_batch_mean(loss_sum, lengths)
# log.info(f"loss_base: {loss_base.item():.4f} loss_stability: {loss_mean.item():.4f}")
auxilary_losses = { 'loss_base': loss_base, 'loss_kldiv': loss_mean }
return (loss_base + loss_mean), auxilary_losses
def _embed_sentences(self, sentences: List[Sentence]) -> (torch.tensor, List[int]):
self.embeddings.embed(sentences)
sentences.sort(key=lambda x: len(x), reverse=True)
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
tag_list: List = []
longest_token_sequence_in_batch: int = lengths[0]
# initialize zero-padded word embeddings tensor
embeddings = torch.zeros(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
],
dtype=torch.float,
device=flair.device,
)
for s_id, sentence in enumerate(sentences):
# fill values with word embeddings
embeddings[s_id][: len(sentence)] = torch.cat(
[token.get_embedding().unsqueeze(0) for token in sentence], 0
)
# get the tags in this sentence
tag_idx: List[int] = [
self.tag_dictionary.get_idx_for_item(token.get_tag(self.tag_type).value)
for token in sentence
]
# add tags as tensor
tag = torch.LongTensor(tag_idx).to(flair.device)
tag_list.append(tag)
return embeddings, lengths
def _forward(self, embeddings: torch.tensor, lengths: List[int]):
encoder_features = embeddings.transpose(0, 1)
# --------------------------------------------------------------------
# FF PART
# --------------------------------------------------------------------
if self.use_dropout > 0.0:
encoder_features = self.dropout(encoder_features)
if self.use_word_dropout > 0.0:
encoder_features = self.word_dropout(encoder_features)
if self.use_locked_dropout > 0.0:
encoder_features = self.locked_dropout(encoder_features)
if self.relearn_embeddings:
encoder_features = self.embedding2nn(encoder_features)
if self.use_rnn:
packed = torch.nn.utils.rnn.pack_padded_sequence(encoder_features, lengths)
rnn_output, hidden = self.rnn(packed)
decoder_features, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(
rnn_output
)
if self.use_dropout > 0.0:
decoder_features = self.dropout(decoder_features)
# word dropout only before LSTM - TODO: more experimentation needed
# if self.use_word_dropout > 0.0:
# decoder_features = self.word_dropout(decoder_features)
if self.use_locked_dropout > 0.0:
decoder_features = self.locked_dropout(decoder_features)
outputs = self.linear(decoder_features)
return outputs.transpose(0, 1), decoder_features.transpose(0, 1)
def forward(self, sentences: List[Sentence], params: dict = {}):
verbose = params.get("verbose", False)
eval_mode = params.get("eval_mode", TrainingMode.Standard)
misspell_mode = params.get("misspell_mode", MisspellingMode.Random)
misspelling_rate = params.get("misspelling_rate", 0.0)
char_vocab = params.get("char_vocab", {})
lut = params.get("lut", {})
cmx = params.get("cmx", {})
typos = params.get("typos", {})
spell_check = params.get("spell_check", None)
self.zero_grad()
if eval_mode is EvalMode.Standard:
outputs = self._forward_standard(sentences, spell_check)
elif eval_mode is EvalMode.Misspellings:
outputs = self._forward_misspelled(sentences, misspelling_rate=misspelling_rate, misspell_mode=misspell_mode,
char_vocab=char_vocab, lut=lut, cmx=cmx, typos=typos, spell_check=spell_check, verbose=verbose)
else:
raise Exception("Evaluation mode '{}' is not supported!".format(eval_mode))
return outputs
def _forward_standard(self, sentences: List[Sentence], spell_check = None):
# self.zero_grad()
if spell_check != None:
from robust_ner.spellcheck import correct_sentences
corrected_sentences = correct_sentences(spell_check, sentences)
clear_embeddings(corrected_sentences, also_clear_word_embeddings=True)
embeddings, lengths = self._embed_sentences(corrected_sentences)
else:
embeddings, lengths = self._embed_sentences(sentences)
outputs, _ = self._forward(embeddings, lengths)
return outputs
def _forward_misspelled(
self, sentences: Union[List[Sentence], Sentence], misspelling_rate: float, misspell_mode: MisspellingMode, char_vocab: set,
cmx: np.array, lut: dict, typos:dict, spell_check = None, verbose: bool = False
) -> (torch.tensor, dict):
misspelled_sentences, _ = noise_sentences(sentences, misspell_mode, misspelling_rate, char_vocab, cmx, lut, typos, verbose)
clear_embeddings(misspelled_sentences, also_clear_word_embeddings=True)
outputs_misspell = self._forward_standard(misspelled_sentences, spell_check)
return outputs_misspell
| 21,916 | 39.362799 | 133 | py |
nat-acl2020 | nat-acl2020-master/flair_ext/models/__init__.py | from .nat_sequence_tagger_model import NATSequenceTagger
| 57 | 28 | 56 | py |
nat-acl2020 | nat-acl2020-master/flair_ext/visual/training_curves.py | import logging
from collections import defaultdict
from pathlib import Path
from typing import Union, List
import numpy as np
import csv
import matplotlib
import math
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# header for 'weights.txt'
WEIGHT_NAME = 1
WEIGHT_NUMBER = 2
WEIGHT_VALUE = 3
log = logging.getLogger("flair")
class Plotter(object):
"""
Plots training parameters (loss, f-score, and accuracy) and training weights over time.
Input files are the output files 'loss.tsv' and 'weights.txt' from training either a sequence tagger or text
classification model.
"""
@staticmethod
def _extract_evaluation_data(file_name: Path, score: str = "F1") -> dict:
training_curves = {
"train": {"loss": [], "score": []},
"test": {"loss": [], "score": []},
"dev": {"loss": [], "score": []},
}
with open(file_name, "r") as tsvin:
tsvin = csv.reader(tsvin, delimiter="\t")
# determine the column index of loss, f-score and accuracy for train, dev and test split
row = next(tsvin, None)
score = score.upper()
if f"TEST_{score}" not in row:
log.warning("-" * 100)
log.warning(f"WARNING: No {score} found for test split in this data.")
log.warning(
f"Are you sure you want to plot {score} and not another value?"
)
log.warning("-" * 100)
TRAIN_SCORE = (
row.index(f"TRAIN_{score}") if f"TRAIN_{score}" in row else None
)
DEV_SCORE = row.index(f"DEV_{score}") if f"DEV_{score}" in row else None
TEST_SCORE = row.index(f"TEST_{score}") if f"TEST_{score}" in row else None
# then get all relevant values from the tsv
for row in tsvin:
if TRAIN_SCORE is not None:
if row[TRAIN_SCORE] != "_":
training_curves["train"]["score"].append(
float(row[TRAIN_SCORE])
)
if DEV_SCORE is not None and row[DEV_SCORE] != "_":
training_curves["dev"]["score"].append(float(row[DEV_SCORE]))
if TEST_SCORE is not None and row[TEST_SCORE] != "_":
training_curves["test"]["score"].append(float(row[TEST_SCORE]))
return training_curves
@staticmethod
def _extract_weight_data(file_name: Path) -> dict:
weights = defaultdict(lambda: defaultdict(lambda: list()))
with open(file_name, "r") as tsvin:
tsvin = csv.reader(tsvin, delimiter="\t")
for row in tsvin:
name = row[WEIGHT_NAME]
param = row[WEIGHT_NUMBER]
value = float(row[WEIGHT_VALUE])
weights[name][param].append(value)
return weights
@staticmethod
def _extract_learning_rate(file_name: Path):
lrs = []
losses = []
with open(file_name, "r") as tsvin:
tsvin = csv.reader(tsvin, delimiter="\t")
row = next(tsvin, None)
LEARNING_RATE = row.index("LEARNING_RATE")
TRAIN_LOSS = row.index("TRAIN_LOSS")
# then get all relevant values from the tsv
for row in tsvin:
if row[TRAIN_LOSS] != "_":
losses.append(float(row[TRAIN_LOSS]))
if row[LEARNING_RATE] != "_":
lrs.append(float(row[LEARNING_RATE]))
return lrs, losses
def plot_weights(self, file_name: Union[str, Path]):
if type(file_name) is str:
file_name = Path(file_name)
weights = self._extract_weight_data(file_name)
total = len(weights)
columns = 2
rows = max(2, int(math.ceil(total / columns)))
figsize = (5, 5)
if rows != columns:
figsize = (5, rows + 5)
fig = plt.figure()
f, axarr = plt.subplots(rows, columns, figsize=figsize)
c = 0
r = 0
for name, values in weights.items():
# plot i
axarr[r, c].set_title(name, fontsize=6)
for _, v in values.items():
axarr[r, c].plot(np.arange(0, len(v)), v, linewidth=0.35)
axarr[r, c].set_yticks([])
axarr[r, c].set_xticks([])
c += 1
if c == columns:
c = 0
r += 1
while r != rows and c != columns:
axarr[r, c].set_yticks([])
axarr[r, c].set_xticks([])
c += 1
if c == columns:
c = 0
r += 1
# save plots
f.subplots_adjust(hspace=0.5)
plt.tight_layout(pad=1.0)
path = file_name.parent / "weights.png"
plt.savefig(path, dpi=300)
plt.close(fig)
def plot_training_curves(
self, file_name: Union[str, Path], plot_values: List[str] = ["loss", "F1"]
):
if type(file_name) is str:
file_name = Path(file_name)
fig = plt.figure(figsize=(15, 10))
for plot_no, plot_value in enumerate(plot_values):
training_curves = self._extract_evaluation_data(file_name, plot_value)
plt.subplot(len(plot_values), 1, plot_no + 1)
if training_curves["train"]["score"]:
x = np.arange(0, len(training_curves["train"]["score"]))
plt.plot(
x, training_curves["train"]["score"], label=f"training {plot_value}"
)
if training_curves["dev"]["score"]:
x = np.arange(0, len(training_curves["dev"]["score"]))
plt.plot(
x, training_curves["dev"]["score"], label=f"validation {plot_value}"
)
if training_curves["test"]["score"]:
x = np.arange(0, len(training_curves["test"]["score"]))
plt.plot(
x, training_curves["test"]["score"], label=f"test {plot_value}"
)
plt.legend(bbox_to_anchor=(1.04, 0), loc="lower left", borderaxespad=0)
plt.ylabel(plot_value)
plt.xlabel("epochs")
# save plots
plt.tight_layout(pad=1.0)
path = file_name.parent / "training.png"
plt.savefig(path, dpi=300)
plt.close(fig)
def plot_learning_rate(
self, file_name: Union[str, Path], skip_first: int = 10, skip_last: int = 5
):
if type(file_name) is str:
file_name = Path(file_name)
lrs, losses = self._extract_learning_rate(file_name)
lrs = lrs[skip_first:-skip_last] if skip_last > 0 else lrs[skip_first:]
losses = losses[skip_first:-skip_last] if skip_last > 0 else losses[skip_first:]
fig, ax = plt.subplots(1, 1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale("log")
ax.xaxis.set_major_formatter(plt.FormatStrFormatter("%.0e"))
# save plot
plt.tight_layout(pad=1.0)
path = file_name.parent / "learning_rate.png"
plt.savefig(path, dpi=300)
plt.close(fig)
| 7,271 | 31.609865 | 112 | py |
nat-acl2020 | nat-acl2020-master/flair_ext/trainers/__init__.py | from .trainer import ParameterizedModelTrainer
| 47 | 23 | 46 | py |
nat-acl2020 | nat-acl2020-master/flair_ext/trainers/trainer.py | from pathlib import Path
from typing import List, Union
import datetime
from torch.optim.sgd import SGD
from torch.utils.data.dataset import ConcatDataset
import flair
import flair.nn
from flair.data import Sentence, MultiCorpus, Corpus
from flair.datasets import DataLoader
from flair.training_utils import (
init_output_file,
WeightExtractor,
clear_embeddings,
EvaluationMetric,
log_line,
add_file_handler,
Result,
)
from flair.optim import *
from flair.trainers import ModelTrainer
from robust_ner.noise import (
make_char_vocab,
)
from robust_ner.confusion_matrix import (
load_confusion_matrix,
filter_cmx,
make_vocab_from_lut,
)
from robust_ner.enums import (
TrainingMode,
MisspellingMode,
EvalMode,
)
log = logging.getLogger("flair")
class ParameterizedModelTrainer(ModelTrainer):
def __init__(
self,
model: flair.nn.Model,
corpus: Corpus,
optimizer: Optimizer = SGD,
epoch: int = 0,
loss: float = 10000.0,
optimizer_state: dict = None,
scheduler_state: dict = None,
):
super(ParameterizedModelTrainer, self).__init__(model, corpus, optimizer, epoch, loss, optimizer_state, scheduler_state)
def train(
self,
base_path: Union[Path, str],
evaluation_metric: EvaluationMetric = EvaluationMetric.MICRO_F1_SCORE,
learning_rate: float = 0.1,
mini_batch_size: int = 32,
eval_mini_batch_size: int = None,
max_epochs: int = 100,
anneal_factor: float = 0.5,
patience: int = 3,
train_with_dev: bool = False,
monitor_train: bool = False,
embeddings_in_memory: bool = True,
checkpoint: bool = False,
save_final_model: bool = True,
anneal_with_restarts: bool = False,
shuffle: bool = True,
param_selection_mode: bool = False,
num_workers: int = 8,
valid_with_misspellings: bool = True,
**kwargs,
) -> dict:
if eval_mini_batch_size is None:
eval_mini_batch_size = mini_batch_size
# cast string to Path
if type(base_path) is str:
base_path = Path(base_path)
log_handler = add_file_handler(log, base_path / "training.log")
log_line(log)
log.info(f'Model: "{self.model}"')
log_line(log)
log.info(f'Corpus: "{self.corpus}"')
log_line(log)
log.info("Parameters:")
log.info(f' - learning_rate: "{learning_rate}"')
log.info(f' - mini_batch_size: "{mini_batch_size}"')
log.info(f' - patience: "{patience}"')
log.info(f' - anneal_factor: "{anneal_factor}"')
log.info(f' - max_epochs: "{max_epochs}"')
log.info(f' - shuffle: "{shuffle}"')
log.info(f' - train_with_dev: "{train_with_dev}"')
log.info(f' - valid_with_misspellings: "{valid_with_misspellings}"')
log.info("Model:")
log.info(f' - hidden_size: "{self.model.hidden_size}"')
log.info(f' - train_mode: "{self.model.train_mode}"')
log.info(f' - alpha: "{self.model.alpha}"')
log.info(f' - misspell_mode: "{self.model.misspell_mode}"')
log.info(f' - misspelling_rate: "{self.model.misspelling_rate_train}"')
log.info(f' - cmx_file: "{self.model.cmx_file_train}"')
log_line(log)
log.info(f'Model training base path: "{base_path}"')
log_line(log)
log.info(f"Evaluation method: {evaluation_metric.name}")
# determine what splits (train, dev, test) to evaluate and log
log_train = True if monitor_train else False
log_test = True if (not param_selection_mode and self.corpus.test) else False
log_dev = True if not train_with_dev else False
log_test = not log_dev
eval_misspelling_rate = 0.05
log_suffix = lambda prefix, rate, cm, mode: f"{prefix} (misspell: cmx={cm})" if mode == MisspellingMode.ConfusionMatrixBased else f"{prefix} (misspell: rate={rate})"
loss_txt = init_output_file(base_path, "loss.tsv")
with open(loss_txt, "a") as f:
f.write(f"EPOCH\tTIMESTAMP\tBAD_EPOCHS\tLEARNING_RATE\tTRAIN_LOSS")
dummy_result, _ = self.model.evaluate(
[Sentence("d", labels=["0.1"])],
eval_mini_batch_size,
embeddings_in_memory,
)
if log_train:
f.write(
"\tTRAIN_" + "\tTRAIN_".join(dummy_result.log_header.split("\t"))
)
if log_dev:
f.write(
"\tDEV_LOSS\tDEV_"
+ "\tDEV_".join(dummy_result.log_header.split("\t"))
)
if valid_with_misspellings:
suffix=log_suffix('DEV', eval_misspelling_rate, self.model.cmx_file_train, self.model.misspell_mode)
f.write(
f"\t{suffix}" + f"_LOSS\t{suffix})_" + f"\t{suffix}_".join(dummy_result.log_header.split("\t"))
)
if log_test:
f.write(
"\tTEST_LOSS\tTEST_"
+ "\tTEST_".join(dummy_result.log_header.split("\t"))
)
if valid_with_misspellings:
suffix=log_suffix('TEST', eval_misspelling_rate, self.model.cmx_file_train, self.model.misspell_mode)
f.write(
f"\t{suffix}" + f"_LOSS\t{suffix})_" + f"\t{suffix}_".join(dummy_result.log_header.split("\t"))
)
weight_extractor = WeightExtractor(base_path)
optimizer = self.optimizer(self.model.parameters(), lr=learning_rate, **kwargs)
if self.optimizer_state is not None:
optimizer.load_state_dict(self.optimizer_state)
# minimize training loss if training with dev data, else maximize dev score
anneal_mode = "min" if train_with_dev else "max"
if isinstance(optimizer, (AdamW, SGDW)):
scheduler = ReduceLRWDOnPlateau(
optimizer,
factor=anneal_factor,
patience=patience,
mode=anneal_mode,
verbose=True,
)
else:
scheduler = ReduceLROnPlateau(
optimizer,
factor=anneal_factor,
patience=patience,
mode=anneal_mode,
verbose=True,
)
if self.scheduler_state is not None:
scheduler.load_state_dict(self.scheduler_state)
train_data = self.corpus.train
# if training also uses dev data, include in training set
if train_with_dev:
train_data = ConcatDataset([self.corpus.train, self.corpus.dev])
dev_clean_score_history = []
dev_noisy_score_history = []
dev_clean_loss_history = []
dev_noisy_loss_history = []
train_loss_history = []
complete_data = ConcatDataset([self.corpus.train, self.corpus.dev, self.corpus.test])
char_vocab = make_char_vocab(complete_data)
log.info(f"Vocabulary of the corpus (#{len(char_vocab)}): {char_vocab}")
if self.model.misspell_mode == MisspellingMode.ConfusionMatrixBased:
cmx, lut = load_confusion_matrix(self.model.cmx_file_train)
cmx, lut = filter_cmx(cmx, lut, char_vocab)
else:
cmx, lut = None, {}
loss_params = {}
loss_params["verbose"] = False
loss_params["char_vocab"] = char_vocab
loss_params["cmx"] = cmx
loss_params["lut"] = lut
loss_params["embeddings_in_memory"] = embeddings_in_memory
# At any point you can hit Ctrl + C to break out of training early.
try:
previous_learning_rate = learning_rate
for epoch in range(0 + self.epoch, max_epochs + self.epoch):
log_line(log)
try:
bad_epochs = scheduler.num_bad_epochs
except:
bad_epochs = 0
for group in optimizer.param_groups:
learning_rate = group["lr"]
# reload last best model if annealing with restarts is enabled
if (
learning_rate != previous_learning_rate
and anneal_with_restarts
and (base_path / "best-model.pt").exists()
):
log.info("resetting to best model")
self.model.load(base_path / "best-model.pt")
previous_learning_rate = learning_rate
# stop training if learning rate becomes too small
if learning_rate < 0.0001:
log_line(log)
log.info("learning rate too small - quitting training!")
log_line(log)
break
batch_loader = DataLoader(
train_data,
batch_size=mini_batch_size,
shuffle=shuffle,
num_workers=num_workers,
)
self.model.train()
train_loss: float = 0
train_auxilary_losses = {}
seen_batches = 0
total_number_of_batches = len(batch_loader)
modulo = max(1, int(total_number_of_batches / 10))
for batch_no, batch in enumerate(batch_loader):
loss, auxilary_losses = self.model.forward_loss(batch, params=loss_params)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)
optimizer.step()
seen_batches += 1
train_loss += loss.item()
for k,v in auxilary_losses.items():
train_auxilary_losses[k] = train_auxilary_losses.get(k, 0) + v
clear_embeddings(
batch, also_clear_word_embeddings=not embeddings_in_memory
)
if batch_no % modulo == 0:
msg = f"epoch {epoch + 1} - iter {batch_no}/{total_number_of_batches} - loss {train_loss / seen_batches:.6f}"
# note: this is the loss accumulated in the current epoch divided by the number of already seen batches
if len(train_auxilary_losses) > 0:
aux_losses_str = " ".join([f"{key}={value / seen_batches:.6f}" for (key, value) in train_auxilary_losses.items()])
msg += f" ({aux_losses_str})"
log.info(msg)
iteration = epoch * total_number_of_batches + batch_no
if not param_selection_mode:
weight_extractor.extract_weights(
self.model.state_dict(), iteration
)
train_loss /= seen_batches
for k,v in auxilary_losses.items():
train_auxilary_losses[k] /= seen_batches
self.model.eval()
log_line(log)
log.info(
f"EPOCH {epoch + 1} done: loss {train_loss:.6f} - lr {learning_rate:.4f} - bad epochs {bad_epochs}"
)
# anneal against train loss if training with dev, otherwise anneal against dev score
current_score = train_loss
with open(loss_txt, "a") as f:
f.write(
f"\n{epoch}\t{datetime.datetime.now():%H:%M:%S}\t{bad_epochs}\t{learning_rate:.4f}\t{train_loss}"
)
if log_train:
train_eval_result, train_loss = self.model.evaluate(
self.corpus.train,
eval_mini_batch_size,
embeddings_in_memory,
num_workers=num_workers,
)
f.write(f"\t{train_eval_result.log_line}")
if log_dev:
dev_eval_result_clean, dev_loss_clean = self.model.evaluate(
self.corpus.dev,
eval_mini_batch_size,
embeddings_in_memory,
num_workers=num_workers,
)
f.write(f"\t{dev_loss_clean}\t{dev_eval_result_clean.log_line}")
log.info(
f"DEV : loss {dev_loss_clean:.6f} - score {dev_eval_result_clean.main_score:.4f}"
)
# calculate scores using dev data if available
# append dev score to score history
dev_clean_score_history.append(dev_eval_result_clean.main_score)
dev_clean_loss_history.append(dev_loss_clean)
if valid_with_misspellings:
# evaluate on misspellings
dev_eval_result_noisy, dev_loss_noisy = self.model.evaluate(
self.corpus.dev,
eval_mini_batch_size,
embeddings_in_memory,
num_workers=num_workers,
eval_mode=EvalMode.Misspellings,
misspell_mode=self.model.misspell_mode,
char_vocab=char_vocab,
cmx=cmx,
lut=lut,
misspelling_rate=eval_misspelling_rate,
)
f.write(f"\t{dev_loss_noisy}\t{dev_eval_result_noisy.log_line}")
log.info(
f"{log_suffix('DEV', eval_misspelling_rate, self.model.cmx_file_train, self.model.misspell_mode)}"
+ f" : loss {dev_loss_noisy:.6f} - score {dev_eval_result_noisy.main_score:.4f}"
)
# calculate scores using dev data if available
# append dev score to score history
dev_noisy_score_history.append(dev_eval_result_noisy)
dev_noisy_loss_history.append(dev_loss_noisy)
current_score = (dev_eval_result_clean.main_score + dev_eval_result_noisy.main_score) / 2.0
else:
current_score = dev_eval_result_clean.main_score
if log_test:
test_eval_result_clean, test_loss_clean = self.model.evaluate(
self.corpus.test,
eval_mini_batch_size,
embeddings_in_memory,
base_path / f"test.tsv",
num_workers=num_workers,
)
f.write(f"\t{test_loss_clean}\t{test_eval_result_clean.log_line}")
log.info(
f"TEST : loss {test_loss_clean:.6f} - score {test_eval_result_clean.main_score:.4f}"
)
if valid_with_misspellings:
# evaluate on misspellings
test_eval_result_noisy, test_loss_noisy = self.model.evaluate(
self.corpus.test,
eval_mini_batch_size,
embeddings_in_memory,
base_path / f"test.tsv",
num_workers=num_workers,
eval_mode=EvalMode.Misspellings,
misspell_mode=self.model.misspell_mode,
char_vocab=char_vocab,
cmx=cmx,
lut=lut,
misspelling_rate=eval_misspelling_rate,
)
f.write(f"\t{test_loss_noisy}\t{test_eval_result_noisy.log_line}")
log.info(
f"{log_suffix('TEST', eval_misspelling_rate, self.model.cmx_file_train, self.model.misspell_mode)}"
+ f" : loss {test_loss_noisy:.6f} - score {test_eval_result_noisy.main_score:.4f}"
#f"TEST (misspell, rate={eval_misspelling_rate}) : loss {test_loss_noisy:.6f} - score {test_eval_result_noisy.main_score:.4f}"
)
scheduler.step(current_score)
train_loss_history.append(train_loss)
# if checkpoint is enable, save model at each epoch
if checkpoint and not param_selection_mode:
self.model.save_checkpoint(
base_path / "checkpoint.pt",
optimizer.state_dict(),
scheduler.state_dict(),
epoch + 1,
train_loss,
)
# if we use dev data, remember best model based on dev evaluation score
if (
not train_with_dev
and not param_selection_mode
and current_score == scheduler.best
):
log.info("'best-model.pt' saved.")
self.model.save(base_path / "best-model.pt")
# if we do not use dev data for model selection, save final model
if save_final_model and not param_selection_mode:
self.model.save(base_path / "final-model.pt")
except KeyboardInterrupt:
log_line(log)
log.info("Exiting from training early.")
if not param_selection_mode:
log.info("Saving model ...")
self.model.save(base_path / "final-model.pt")
log.info("Done.")
# test best model if test data is present
if self.corpus.test:
final_score_clean = self.final_test(
base_path,
embeddings_in_memory,
evaluation_metric,
eval_mini_batch_size,
num_workers,
)
final_score_noisy = self.final_test(
base_path,
embeddings_in_memory,
evaluation_metric,
eval_mini_batch_size,
num_workers,
eval_mode=EvalMode.Misspellings,
misspell_mode=self.model.misspell_mode,
misspelling_rate=eval_misspelling_rate,
char_vocab=char_vocab,
cmx=cmx,
lut=lut,
)
else:
final_score_clean, final_score_noisy = 0, 0
log.info("Test data not provided setting final score to 0")
log.removeHandler(log_handler)
return {
"test_score_clean": final_score_clean,
"test_score_noisy": final_score_noisy,
"dev_clean_score_history": dev_clean_score_history,
"dev_noisy_score_history": dev_noisy_score_history,
"train_loss_history": train_loss_history,
"dev_clean_loss_history": dev_clean_loss_history,
"dev_noisy_loss_history": dev_noisy_loss_history,
}
def final_test(
self,
base_path: Path,
embeddings_in_memory: bool,
evaluation_metric: EvaluationMetric,
eval_mini_batch_size: int,
num_workers: int = 8,
eval_mode: EvalMode = EvalMode.Standard,
misspell_mode: MisspellingMode = MisspellingMode.Random,
misspelling_rate: float = 0.0,
char_vocab: set = {},
cmx = None,
lut = {},
):
log_line(log)
log.info("Testing using best model ...")
self.model.eval()
if (base_path / "best-model.pt").exists():
self.model = self.model.load(base_path / "best-model.pt")
test_results, test_loss = self.model.evaluate(
self.corpus.test,
eval_mini_batch_size=eval_mini_batch_size,
embeddings_in_memory=embeddings_in_memory,
out_path=base_path / "test.tsv",
num_workers=num_workers,
eval_mode=eval_mode,
misspell_mode=misspell_mode,
misspelling_rate=misspelling_rate,
char_vocab=char_vocab,
cmx=cmx,
lut=lut,
)
test_results: Result = test_results
log.info(test_results.log_line)
log.info(test_results.detailed_results)
log_line(log)
# if we are training over multiple datasets, do evaluation for each
if type(self.corpus) is MultiCorpus:
for subcorpus in self.corpus.corpora:
log_line(log)
self.model.evaluate(
subcorpus.test,
eval_mini_batch_size,
embeddings_in_memory,
base_path / f"{subcorpus.name}-test.tsv",
eval_mode=eval_mode,
misspelling_rate=misspelling_rate,
char_vocab=char_vocab,
)
# get and return the final test score of best model
final_score = test_results.main_score
return final_score
@classmethod
def load_from_checkpoint(
cls, checkpoint, corpus: Corpus, optimizer: Optimizer = SGD
):
return ParameterizedModelTrainer(
checkpoint["model"],
corpus,
optimizer,
epoch=checkpoint["epoch"],
loss=checkpoint["loss"],
optimizer_state=checkpoint["optimizer_state_dict"],
scheduler_state=checkpoint["scheduler_state_dict"],
) | 22,609 | 39.30303 | 173 | py |
KoG2P | KoG2P-master/g2p.py | # -*- coding: utf-8 -*-
'''
g2p.py
~~~~~~~~~~
This script converts Korean graphemes to romanized phones and then to pronunciation.
(1) graph2phone: convert Korean graphemes to romanized phones
(2) phone2prono: convert romanized phones to pronunciation
(3) graph2phone: convert Korean graphemes to pronunciation
Usage: $ python g2p.py '스물 여덟째 사람'
(NB. Please check 'rulebook_path' before usage.)
Yejin Cho (ycho@utexas.edu)
Jaegu Kang (jaekoo.jk@gmail.com)
Hyungwon Yang (hyung8758@gmail.com)
Yeonjung Hong (yvonne.yj.hong@gmail.com)
Created: 2016-08-11
Last updated: 2019-01-31 Yejin Cho
* Key updates made:
- Executable in both Python 2 and 3.
- G2P Performance test available ($ python g2p.py test)
- G2P verbosity control available
'''
import datetime as dt
import re
import math
import sys
import optparse
# Option
parser = optparse.OptionParser()
parser.add_option("-v", action="store_true", dest="verbose", default="False",
help="This option prints the detail information of g2p process.")
(options,args) = parser.parse_args()
verbose = options.verbose
# Check Python version
ver_info = sys.version_info
if ver_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf-8')
def readfileUTF8(fname):
f = open(fname, 'r')
corpus = []
while True:
line = f.readline()
line = line.encode("utf-8")
line = re.sub(u'\n', u'', line)
if line != u'':
corpus.append(line)
if not line: break
f.close()
return corpus
def writefile(body, fname):
out = open(fname, 'w')
for line in body:
out.write('{}\n'.format(line))
out.close()
def readRules(pver, rule_book):
if pver == 2:
f = open(rule_book, 'r')
elif pver == 3:
f = open(rule_book, 'r',encoding="utf-8")
rule_in = []
rule_out = []
while True:
line = f.readline()
if pver == 2:
line = unicode(line.encode("utf-8"))
line = re.sub(u'\n', u'', line)
elif pver == 3:
line = re.sub('\n', '', line)
if line != u'':
if line[0] != u'#':
IOlist = line.split('\t')
rule_in.append(IOlist[0])
if IOlist[1]:
rule_out.append(IOlist[1])
else: # If output is empty (i.e. deletion rule)
rule_out.append(u'')
if not line: break
f.close()
return rule_in, rule_out
def isHangul(charint):
hangul_init = 44032
hangul_fin = 55203
return charint >= hangul_init and charint <= hangul_fin
def checkCharType(var_list):
# 1: whitespace
# 0: hangul
# -1: non-hangul
checked = []
for i in range(len(var_list)):
if var_list[i] == 32: # whitespace
checked.append(1)
elif isHangul(var_list[i]): # Hangul character
checked.append(0)
else: # Non-hangul character
checked.append(-1)
return checked
def graph2phone(graphs):
# Encode graphemes as utf8
try:
graphs = graphs.decode('utf8')
except AttributeError:
pass
integers = []
for i in range(len(graphs)):
integers.append(ord(graphs[i]))
# Romanization (according to Korean Spontaneous Speech corpus; 성인자유발화코퍼스)
phones = ''
ONS = ['k0', 'kk', 'nn', 't0', 'tt', 'rr', 'mm', 'p0', 'pp',
's0', 'ss', 'oh', 'c0', 'cc', 'ch', 'kh', 'th', 'ph', 'h0']
NUC = ['aa', 'qq', 'ya', 'yq', 'vv', 'ee', 'yv', 'ye', 'oo', 'wa',
'wq', 'wo', 'yo', 'uu', 'wv', 'we', 'wi', 'yu', 'xx', 'xi', 'ii']
COD = ['', 'kf', 'kk', 'ks', 'nf', 'nc', 'nh', 'tf',
'll', 'lk', 'lm', 'lb', 'ls', 'lt', 'lp', 'lh',
'mf', 'pf', 'ps', 's0', 'ss', 'oh', 'c0', 'ch',
'kh', 'th', 'ph', 'h0']
# Pronunciation
idx = checkCharType(integers)
iElement = 0
while iElement < len(integers):
if idx[iElement] == 0: # not space characters
base = 44032
df = int(integers[iElement]) - base
iONS = int(math.floor(df / 588)) + 1
iNUC = int(math.floor((df % 588) / 28)) + 1
iCOD = int((df % 588) % 28) + 1
s1 = '-' + ONS[iONS - 1] # onset
s2 = NUC[iNUC - 1] # nucleus
if COD[iCOD - 1]: # coda
s3 = COD[iCOD - 1]
else:
s3 = ''
tmp = s1 + s2 + s3
phones = phones + tmp
elif idx[iElement] == 1: # space character
tmp = '#'
phones = phones + tmp
phones = re.sub('-(oh)', '-', phones)
iElement += 1
tmp = ''
# 초성 이응 삭제
phones = re.sub('^oh', '', phones)
phones = re.sub('-(oh)', '', phones)
# 받침 이응 'ng'으로 처리 (Velar nasal in coda position)
phones = re.sub('oh-', 'ng-', phones)
phones = re.sub('oh([# ]|$)', 'ng', phones)
# Remove all characters except Hangul and syllable delimiter (hyphen; '-')
phones = re.sub('(\W+)\-', '\\1', phones)
phones = re.sub('\W+$', '', phones)
phones = re.sub('^\-', '', phones)
return phones
def phone2prono(phones, rule_in, rule_out):
# Apply g2p rules
for pattern, replacement in zip(rule_in, rule_out):
# print pattern
phones = re.sub(pattern, replacement, phones)
prono = phones
return prono
def addPhoneBoundary(phones):
# Add a comma (,) after every second alphabets to mark phone boundaries
ipos = 0
newphones = ''
while ipos + 2 <= len(phones):
if phones[ipos] == u'-':
newphones = newphones + phones[ipos]
ipos += 1
elif phones[ipos] == u' ':
ipos += 1
elif phones[ipos] == u'#':
newphones = newphones + phones[ipos]
ipos += 1
newphones = newphones + phones[ipos] + phones[ipos+1] + u','
ipos += 2
return newphones
def addSpace(phones):
ipos = 0
newphones = ''
while ipos < len(phones):
if ipos == 0:
newphones = newphones + phones[ipos] + phones[ipos + 1]
else:
newphones = newphones + ' ' + phones[ipos] + phones[ipos + 1]
ipos += 2
return newphones
def graph2prono(graphs, rule_in, rule_out):
romanized = graph2phone(graphs)
romanized_bd = addPhoneBoundary(romanized)
prono = phone2prono(romanized_bd, rule_in, rule_out)
prono = re.sub(u',', u' ', prono)
prono = re.sub(u' $', u'', prono)
prono = re.sub(u'#', u'-', prono)
prono = re.sub(u'-+', u'-', prono)
prono_prev = prono
identical = False
loop_cnt = 1
if verbose == True:
print ('=> Romanized: ' + romanized)
print ('=> Romanized with boundaries: ' + romanized_bd)
print ('=> Initial output: ' + prono)
while not identical:
prono_new = phone2prono(re.sub(u' ', u',', prono_prev + u','), rule_in, rule_out)
prono_new = re.sub(u',', u' ', prono_new)
prono_new = re.sub(u' $', u'', prono_new)
if re.sub(u'-', u'', prono_prev) == re.sub(u'-', u'', prono_new):
identical = True
prono_new = re.sub(u'-', u'', prono_new)
if verbose == True:
print('\n=> Exhaustive rule application completed!')
print('=> Total loop count: ' + str(loop_cnt))
print('=> Output: ' + prono_new)
else:
if verbose == True:
print('\n=> Rule applied for more than once')
print('cmp1: ' + re.sub(u'-', u'', prono_prev))
print('cmp2: ' + re.sub(u'-', u'', prono_new))
loop_cnt += 1
prono_prev = prono_new
return prono_new
def testG2P(rulebook, testset):
[testin, testout] = readRules(ver_info[0], testset)
cnt = 0
body = []
for idx in range(0, len(testin)):
print('Test item #: ' + str(idx+1) + '/' + str(len(testin)))
item_in = testin[idx]
item_out = testout[idx]
ans = graph2phone(item_out)
ans = re.sub(u'-', u'', ans)
ans = addSpace(ans)
[rule_in, rule_out] = readRules(ver_info[0], rulebook)
pred = graph2prono(item_in, rule_in, rule_out)
if pred != ans:
print('G2P ERROR: [result] ' + pred + '\t\t\t[ans] ' + item_in + ' [' + item_out + '] ' + ans)
cnt += 1
else:
body.append('[result] ' + pred + '\t\t\t[ans] ' + item_in + ' [' + item_out + '] ' + ans)
print('Total error item #: ' + str(cnt))
writefile(body,'good.txt')
def runKoG2P(graph, rulebook):
[rule_in, rule_out] = readRules(ver_info[0], rulebook)
if ver_info[0] == 2:
prono = graph2prono(unicode(graph), rule_in, rule_out)
elif ver_info[0] == 3:
prono = graph2prono(graph, rule_in, rule_out)
print(prono)
def runTest(rulebook, testset):
print('[ G2P Performance Test ]')
beg = dt.datetime.now()
testG2P(rulebook, testset)
end = dt.datetime.now()
print('Total time: ')
print(end - beg)
# Usage:
if __name__ == '__main__':
if args[0] == 'test': # G2P Performance Test
runTest('rulebook.txt', 'testset.txt')
else:
graph = args[0]
runKoG2P(graph, 'rulebook.txt')
| 9,320 | 26.658754 | 107 | py |
class_DMDR | class_DMDR-master/CLASS_rename.py | # Script to change the names of CLASS modules (by Nils Schöneberg & Julien Lesgourgues)
#
# Can be used to:
# - rename module files, module prefixes, module structures, module structure acronyms
# - undo renaming
# - clean the generated log and backup files
#
# usage: CLASS_rename.py [-h] --method {rename,undo,clean} [-v | -q]
#
# optional arguments:
# -h, --help show this help message and exit
# --method {rename,undo,clean} rename / undo renaming / clean
# -v, --verbose Increase the verbosity of the program for more detailed output
# -q, --quiet Make the program entirely quiet, setting the verbosity to 0.
# Also disables the user confirmation, so use it carefully
#
# The actual renaming to be performed has to be set beforehand in the section below.
# Currently this is set for the transformation
# of CLASS v2.10.8 into CLASS v3.0.0 and backwards.
### EDIT ONLY BELOW ###
### EDIT ONLY BELOW ###
module_filename = ["thermodynamics","perturbations","nonlinear","transfer","spectra"]
module_prefix = ["thermodynamics","perturb","nonlinear","transfer","spectra"]
structure_longname = ["thermo","perturbs","nonlinear","transfers","spectra"]
structure_shortname = ["th","pt","nl","tr","sp"]
newmodule_filename = ["thermodynamics","perturbations","fourier","transfer","harmonic"]
newmodule_prefix = ["thermodynamics","perturbations","fourier","transfer","harmonic"]
newstructure_longname = ["thermodynamics","perturbations","fourier","transfer","harmonic"]
newstructure_shortname= ["th","pt","fo","tr","hr"]
# Potential problem: structure short names are just two
# letters. Combinations of the same two letters may
# appear casually. Thus in some sub-cases we first
# check for exceptions.
# to identify these exception, for each short name (e.g. 'nl'), run:
#
# > grep "nl\." */*.c */*.h */*.py */*.pyx */*.pxd */*.ipynb */*.ini *.ini
#
# > grep "\&nl" */*.c */*.h */*.py */*.pyx */*.pxd */*.ipynb */*.ini *.ini
#
# and check whether some of the lines feature an nl that has nothing
# to do with the stucture short name. If yes, write the exception in
# the dictionary below.
exceptions = {"th":[],
"pt":[],
"nl":["nl_corr","R_nl"],
"tr":[],
"sp":["osp.","resp"]}
prefix_exceptions = {"thermodynamics":[],
"perturbations":[],
"nonlinear":["nonlinear_method","nonlinear_scale","nonlinear_min_k_max"],
"transfer":[],
"spectra":[]}
src_folder = "source"
incl_folder = "include"
test_folder = "test"
### EDIT ONLY ABOVE ###
### EDIT ONLY ABOVE ###
import os
import argparse
# parse the arguments of the command line
parser = argparse.ArgumentParser(description='Change the names of CLASS modules')
parser.add_argument("--method",choices=["rename","undo","clean"], required=True,help="Do you want to rename / undo renaming / clean the generated log and backup files? Type 'rename','undo', or 'clean'")
# default verbosity is 1, can be increased with -v or decreased with -q
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="count",default=1,help="Increase the verbosity of the program for more detailed output")
group.add_argument("-q", "--quiet", action="store_true",help="Make the program entirely quiet, setting the verbosity to 0. This also disables the confirmation required by the user, so use it carefully")
parse_dict = parser.parse_args()
if parse_dict.quiet:
parse_dict.verbose = 0
# Inform the user about the starting of the actual routine
if parse_dict.verbose>0:
print("START RENAMING ROUTINE v.0.3 (credits Nils Schöneberg & Julien Lesgourgues)")
print("CHECKING ALL FILES IN DIRECTORY : "+os.path.abspath("."))
# Find the list of all the directories that we will parse and in which we will do changes in some files
#
# Get the list of all folders and subfolders in the local folder
# After this step, each element x is such that x[0] contains a folder name 'folder/subfolder/.../'
folder_list = [x for x in os.walk(".")]
# remove .git, doc, build folders
folder_list = [x for x in folder_list if not (".git" in x[0])]
#folder_list = [x for x in folder_list if not ("doc" in x[0])]
folder_list = [x for x in folder_list if not ("doc/manual" in x[0])]
folder_list = [x for x in folder_list if not ("doc/input/latex" in x[0])]
folder_list = [x for x in folder_list if not ("build" in x[0])]
# remove the folder of the RealSpaceInterface containing cached data
folder_list = [x for x in folder_list if not ("RealSpaceInterface/static" in x[0])]
# keep only the list of all folders, not the files they contain
folder_list = [x[0] for x in folder_list]
if parse_dict.verbose > 0:
# show the list of 'folder/subfolder/.../'
print("FOLDER LIST : "+" ".join(folder_list))
# let the user confirm or abort
read = input("Continue? (y/n)")
if not read.startswith("y"):
quit()
###############
# 'undo' mode #
###############
if parse_dict.method == "undo":
# For each changed module/file name, go back to old file names (e.g. 'fourier.c' -> 'nonlinear.c') such that they can be overwritten by the corresponding .old files
for i in range(len(module_filename)):
xf = module_filename[i]
yf = newmodule_filename[i]
os.rename(os.path.join(src_folder,yf+".c"),os.path.join(src_folder,xf+".c"))
os.rename(os.path.join(incl_folder,yf+".h"),os.path.join(incl_folder,xf+".h"))
os.rename(os.path.join(test_folder,"test_"+yf+".c"),os.path.join(test_folder,"test_"+xf+".c"))
if parse_dict.verbose > 0:
print("REVERTED TO MODULE NAME "+xf)
# find all folders containing .old and/or .unchanged files
for fldername in folder_list:
# First, get name of all files and subfolders in this folder
filelist_all = os.listdir(fldername)
filelist = []
for fname in filelist_all:
tmp_name = os.path.join(fldername, fname)
# remove subfolder names, keep only file names
if os.path.isdir(tmp_name):
continue
if tmp_name.endswith(".old"):
filelist.append(fname)
elif tmp_name.endswith(".unchanged"):
filelist.append(fname)
if parse_dict.verbose > 2:
print (fldername, filelist)
for filename in filelist:
# remove the log files *.unchanged
# (this can be done safely, as all relevant information is in the .old files)
if(".unchanged" in filename):
os.remove(os.path.join(fldername,filename))
# remove the .old extensions, thus overwriting the changed files with the old files
if(".old" in filename):
os.rename(os.path.join(fldername,filename),os.path.join(fldername,filename.replace(".old","")))
if parse_dict.verbose > 1:
print ("mv "+os.path.join(fldername,filename)+" "+os.path.join(fldername,filename.replace(".old",""))+"!")
if parse_dict.verbose > 0:
print ("IN "+fname+", DELETED .unchanged AND RESTORED ORIGINAL FROM .old FILES")
################
# 'clean' mode #
################
elif parse_dict.method == "clean":
# find all folders containing .old and/or .unchanged files
for fldername in folder_list:
# First, get name of all files and subfolders in this folder
filelist_all = os.listdir(fldername)
filelist = []
for fname in filelist_all:
tmp_name = os.path.join(fldername, fname)
# remove subfolder names, keep only file names
if os.path.isdir(tmp_name):
continue
if tmp_name.endswith(".old"):
filelist.append(fname)
elif tmp_name.endswith(".unchanged"):
filelist.append(fname)
if parse_dict.verbose > 2:
print (fldername,filelist)
for filename in filelist:
# just remove any .unchanged or .old files
if(".unchanged" in filename or ".old" in filename):
os.remove(os.path.join(fldername,filename))
if parse_dict.verbose > 0:
print ("IN "+fname+", DELETED .unchanged AND .old FILES")
try:
# remove log files Makefile.old and possibly autostep.py
os.remove("Makefile.old")
if parse_dict.verbose > 0:
print ("REMOVED Makefile.old")
os.remove(os.path.join("python","autosetup.py"))
if parse_dict.verbose > 0:
print ("REMOVED python/autosetup.py")
except:
pass
#################
# 'rename' mode #
#################
elif parse_dict.method == "rename":
# Some operations only have to be done for the first iteration over all files
# One example of this is the generation of the backup .old files
# Thus, we keep track if this is our first iteration
first_loop = True
# loop over each module to be renamed/modified
for i in range(len(module_filename)):
xf = module_filename[i]
xp = module_prefix[i]
xsl = structure_longname[i]
xss = structure_shortname[i]
yf = newmodule_filename[i]
yp = newmodule_prefix[i]
ysl = newstructure_longname[i]
yss = newstructure_shortname[i]
if parse_dict.verbose > 0:
print("BEGIN RENAMING {} -> {}".format(xsl,ysl))
# Parse and possibly do changes in each file of the folder fldername
for fldername in folder_list:
# Establish the list of file to be parsed and possibly modified in this folder
# First, get name of all files and subfolders in this folder
filelist_all = os.listdir(fldername)
filelist = []
for fname in filelist_all:
tmp_name = os.path.join(fldername, fname)
# remove subfolder names, keep only file names
if os.path.isdir(tmp_name):
continue
# ignore the automatically generated python setup file
if "autosetup.py" in tmp_name:
continue
# take into account other files with extension .c, .py, .pyx, .pxd, .ipynb, .h, .ini
# but not the .py of the local (root) folder (and thus e.g. not this script!)
if tmp_name.endswith(".c"):
filelist.append(fname)
elif tmp_name.endswith(".py"):
if fldername != '.':
filelist.append(fname)
elif tmp_name.endswith(".pyx"):
filelist.append(fname)
elif tmp_name.endswith(".pxd"):
filelist.append(fname)
elif tmp_name.endswith(".ipynb"):
filelist.append(fname)
elif tmp_name.endswith(".h"):
filelist.append(fname)
elif tmp_name.endswith(".ini"):
filelist.append(fname)
elif tmp_name.endswith(".md"):
filelist.append(fname)
# show the list of file to be parsed and possibly modified in this folder
if parse_dict.verbose > 1:
print("WILL MODIFY ALL FILES IN FOLDER '{}': [".format(fldername)+",".join(filelist)+"]")
# iterate over all files in the current folder
for filename in filelist:
# open input file (with old names)
with open(os.path.join(fldername,filename),"r") as inf:
# open temporary output file (where we will subsititue the new names)
with open(os.path.join(fldername,filename+".tmp"),"w") as outf:
# open a log file with extension .unchanged where we will store lines that were not changed but should have, potentially (for visual inspection)
with open(os.path.join(fldername,filename+".unchanged"),"a") as unchf:
# iterate over each line in the input file
line = inf.readline()
while line:
# I. Treat lines where the full structure name appears, e.g. 'nonlinear'
if "struct "+xsl in line:
if "struct "+xsl+" "+xss in line:
# replace each structure declaration (e.g. 'struct nonlinear nl' -> 'struct fourier fo')
# we isolate this case because it is very useful to catch many occurences of the structure short name (e.g. 'nl') already here
line = line.replace("struct "+xsl+" "+xss,"struct "+ysl+" "+yss)
else:
# replace other occurences (e.g. 'struct nonlinear' -> 'struct fourier')
# Special care is needed here! Check that the next character is not a letter
# Thus we only allow for a small selection of relevant possibilities
for char in ['\t','\n',' ','*','`',';',':']:
line = line.replace("struct "+xsl+char,"struct "+ysl+char)
if "cdef "+xsl in line:
if "cdef "+xsl+" "+xss in line:
# replace each structure declaration (e.g. 'cdef nonlinear nl' -> 'cdef fourier fo')
# we isolate this case because it is very useful to catch many occurences of the structure short name (e.g. 'nl') already here
line = line.replace("cdef "+xsl+" "+xss,"cdef "+ysl+" "+yss)
else:
# replace other occurences (e.g. 'cdef nonlinear' -> 'cdef fourier')
line = line.replace("cdef "+xsl,"cdef "+ysl)
if xsl+" structure" in line:
line = line.replace(xsl+" structure",ysl+" structure")
# II. Treat lines where the module (= file) name appears
if xf.upper() in line:
# replace capitalized module name (e.g. '__NONLINEAR__' -> '__FOURIER__')
line = line.replace(xf.upper(),yf.upper())
if xf+".c" in line:
# replace full filename in the comments (e.g. 'nonlinear.c' --> 'fourier.c')
line = line.replace(xf+".c",yf+".c")
if xf+".h" in line:
# replace full filename in the comments (e.g. 'nonlinear.h' --> 'fourier.h')
line = line.replace(xf+".h",yf+".h")
if xf+" module" in line:
# replace full filename in the comments (e.g. 'nonlinear module' --> 'fourier module')
line = line.replace(xf+" module",yf+" module")
if "\""+xf+"\"" in line:
# replace full filename in quotation marks (e.g. '"nonlinear"' --> '"fourier"')
line = line.replace("\""+xf+"\"","\""+yf+"\"")
# III. Treat lines where the prefix appears
if xp+"_" in line:
# replace all prefix names (e.g. 'nonlinear' -> 'fourier')
# For all prefix exceptions, substitute the problematic string with 'xx'
for i,x in enumerate(prefix_exceptions[xp]):
if x in line:
line = line.replace(x,prefix_exceptions[xp][i].replace(xp,'xx'))
# Now replace all the corresponding names where the prefix appears
line = line.replace(xp+"_",yp+"_")
# Finally, re-substitute the original exception string instead of the 'xx'
for i,x in enumerate(prefix_exceptions[xp]):
if x.replace(xp,'xx') in line:
line = line.replace(x.replace(xp,'xx'),prefix_exceptions[xp][i])
# IV. Treat line where short structure name appears, e.g. 'nl'
if xss in line:
# replace pointers towards structure (e.g. 'pnl' -> 'pfo')
if "p"+xss in line:
line = line.replace("p"+xss,"p"+yss)
# replace structure addresses (e.g. '&nl' -> '&fo') and structure short names before dots (e.g. 'nl.error_message' -> 'fo.error_message')
if "&"+xss in line or xss+"." in line:
# For all exceptions, substitute the problematic string with 'xx'
for i,x in enumerate(exceptions[xss]):
if x in line:
line = line.replace(x,exceptions[xss][i].replace(xss,'xx'))
# Now replace all structure short names before dots and addresses
line = line.replace("&"+xss,"&"+yss)
line = line.replace(xss+".",yss+".")
# Finally, re-substitute the original exception string instead of the 'xx'
for i,x in enumerate(exceptions[xss]):
if x.replace(xss,'xx') in line:
line = line.replace(x.replace(xss,'xx'),exceptions[xss][i])
# replace structures as fields of bigger structures in python (e.g. 'self.nl' -> 'self.fo')
if "self."+xss in line:
line = line.replace("self."+xss,"self."+yss)
# if the line did contain the short name in another circumstances, print it in the log file .unchanged
if xss in line:
# Mark the occurence of the short name by arrows (e.g. 'only' -> 'o-->nl<--y')
unchf.write(line.replace(xss,"-->"+xss+"<--"))
# write the line (changed or not) in the temporary output file
outf.write(line)
line = inf.readline()
# keep the input file but add to it an extension .old (so we keep it as a backup, if something goes wrong) e.g. nonlinear.c -> nonlinear.c.old
# This is done only in the first loop over modules.
if first_loop == True:
os.rename(os.path.join(fldername,filename),os.path.join(fldername,filename+".old"))
# give to the temporary output file name its final extension (e.g. 'nonlinear.c.tmp' -> 'nonlinear.c')
os.rename(os.path.join(fldername,filename+".tmp"),os.path.join(fldername,filename))
if parse_dict.verbose > 1:
print("SUCCESS IN FOLDER {}".format(fldername))
# work on the Makefile
if parse_dict.verbose>1:
print("MODIFY MAKEFILE")
with open("Makefile","r") as inf:
# implement the changes in Makefile.tmp
with open("Makefile.tmp","w") as outf:
line = inf.readline()
while line:
# replace long names (e.g. 'nonlinear' -> 'fourier')
if xf in line:
line = line.replace(xf,yf)
# replace long names when capitalized
if xf.upper() in line:
line = line.replace(xf.upper(),yf.upper())
outf.write(line)
line = inf.readline()
# keep old version with additional .old extension
if first_loop == True:
os.rename("Makefile","Makefile.old")
# rename Makefile.tmp -> Makefile
os.rename("Makefile.tmp","Makefile")
if parse_dict.verbose>1:
print("SUCCESS IN MODIFYING MAKEFILE")
# change actual file names (e.g. 'nonlinear.c' -> 'fourier.c')
if parse_dict.verbose>1:
print("RENAME MODULE "+yf)
os.rename(os.path.join(src_folder,xf+".c"),os.path.join(src_folder,yf+".c"))
os.rename(os.path.join(incl_folder,xf+".h"),os.path.join(incl_folder,yf+".h"))
os.rename(os.path.join(test_folder,"test_"+xf+".c"),os.path.join(test_folder,"test_"+yf+".c"))
if parse_dict.verbose>1:
print("SUCCESS IN RENAMING MODULE "+yf)
if parse_dict.verbose > 0:
print("SUCCESS FOR RENAMING {} -> {}".format(xf,yf))
# done for this particular module
first_loop = False
# end of loop over modulea
if parse_dict.verbose>0:
print("SUCCESS!")
| 19,182 | 43.611628 | 202 | py |
class_DMDR | class_DMDR-master/CPU.py | #!/usr/bin/env python
"""
.. module:: CPU
:synopsis: CPU, a CLASS Plotting Utility
.. moduleauthor:: Benjamin Audren <benjamin.audren@gmail.com>
.. credits:: Benjamin Audren, Jesus Torrado
.. version:: 2.0
This is a small python program aimed to gain time when comparing two spectra,
e.g. from CAMB and CLASS, or a non-linear spectrum to a linear one.
It is designed to be used in a command line fashion, not being restricted to
your CLASS directory, though it recognizes mainly CLASS output format. Far from
perfect, or complete, it could use any suggestion for enhancing it,
just to avoid losing time on useless matters for others.
Be warned that, when comparing with other format, the following is assumed:
there are no empty line (especially at the end of file). Gnuplot comment lines
(starting with a # are allowed). This issue will cause a non-very descriptive
error in CPU, any suggestion for testing it is welcome.
Example of use:
- To superimpose two different spectra and see their global shape :
python CPU.py output/lcdm_z2_pk.dat output/lncdm_z2_pk.dat
- To see in details their ratio:
python CPU.py output/lcdm_z2_pk.dat output/lncdm_z2_pk.dat -r
The "PlanckScale" is taken with permission from Jesus Torrado's:
cosmo_mini_toolbox, available under GPLv3 at
https://github.com/JesusTorrado/cosmo_mini_toolbox
"""
from __future__ import unicode_literals, print_function
# System imports
import os
import sys
import argparse
# Numerics
import numpy as np
from numpy import ma
from scipy.interpolate import InterpolatedUnivariateSpline
from math import floor
# Plotting
import matplotlib.pyplot as plt
from matplotlib import scale as mscale
from matplotlib.transforms import Transform
from matplotlib.ticker import FixedLocator
def CPU_parser():
parser = argparse.ArgumentParser(
description=(
'CPU, a CLASS Plotting Utility, specify wether you want\n'
'to superimpose, or plot the ratio of different files.'),
epilog=(
'A standard usage would be, for instance:\n'
'python CPU.py output/test_pk.dat output/test_pk_nl_density.dat'
' -r\npython CPU.py output/wmap_cl.dat output/planck_cl.dat'),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'files', type=str, nargs='*', help='Files to plot')
parser.add_argument('-r', '--ratio', dest='ratio', action='store_true',
help='Plot the ratio of the spectra')
parser.add_argument('-y', '--y-axis', dest='y_axis', nargs='+',
help='specify the fields you want to plot.')
parser.add_argument('-x', '--x-axis', dest='x_axis', type=str,
help='specify the field to be used on the x-axis')
parser.add_argument('--scale', type=str,
choices=['lin', 'loglog', 'loglin', 'george'],
help='Specify the scale to use for the plot')
parser.add_argument('--xlim', dest='xlim', nargs='+', type=float,
default=[], help='Specify the x range')
parser.add_argument('--ylim', dest='ylim', nargs='+', type=float,
default=[], help='Specify the y range')
parser.add_argument(
'-p, --print',
dest='printfile', default='',
help=('print the graph directly in a file. If no name is specified, it'
'uses the name of the first input file'))
parser.add_argument(
'--repeat',
dest='repeat', action='store_true', default=False,
help='repeat the step for all redshifts with same base name')
return parser
def plot_CLASS_output(files, x_axis, y_axis, ratio=False, printing='',
output_name='', extension='', x_variable='',
scale='lin', xlim=[], ylim=[]):
"""
Load the data to numpy arrays, write all the commands for plotting to a
Python script for further refinment, and display them.
Inspired heavily by the matlab version by Thomas Tram
Parameters
----------
files : list
List of files to plot
x-axis : string
name of the column to use as the x coordinate
y-axis : list, str
List of items to plot, which should match the way they appear in the
file, for instance: ['TT', 'BB]
Keyword Arguments
-----------------
ratio : bool
If set to yes, plots the ratio of the files, taking as a reference the
first one
output_name : str
Specify a different name for the produced figure (by default, it takes
the name of the first file, and replace the .dat by .pdf)
extension : str
"""
# Define the python script name, and the pdf path
python_script_path = os.path.splitext(files[0])[0]+'.py'
# The variable text will contain all the lines to be printed in the end to
# the python script path, joined with newline characters. Beware of the
# indentation.
text = ['import matplotlib.pyplot as plt',
'import numpy as np',
'import itertools', '']
# Load all the graphs
data = []
for data_file in files:
data.append(np.loadtxt(data_file))
# Create the full_path_files list, that contains the absolute path, so that
# the future python script can import them directly.
full_path_files = [os.path.abspath(elem) for elem in files]
text += ['files = %s' % full_path_files]
text += ['data = []',
'for data_file in files:',
' data.append(np.loadtxt(data_file))']
# Recover the base name of the files, everything before the dot
roots = [elem.split(os.path.sep)[-1].split('.')[0] for elem in files]
text += ['roots = [%s]' % ', '.join(["'%s'" % root for root in roots])]
# Create the figure and ax objects
fig, ax = plt.subplots()
text += ['', 'fig, ax = plt.subplots()']
# if ratio is not set, then simply plot them all
original_y_axis = y_axis
legend = []
if not ratio:
for index, curve in enumerate(data):
# Recover the number of columns in the first file, as well as their
# title.
num_columns, names, tex_names = extract_headers(files[index])
text += ['', 'index, curve = %i, data[%i]' % (index, index)]
# Check if everything is in order
if num_columns == 2:
y_axis = [names[1]]
elif num_columns > 2:
# in case y_axis was only a string, cast it to a list
if isinstance(original_y_axis, str):
y_axis = [original_y_axis]
else:
y_axis = original_y_axis
# Store the selected text and tex_names to the script
selected = []
for elem in y_axis:
selected.extend(
[name for name in names if name.find(elem) != -1 and
name not in selected])
if not y_axis:
selected = names[1:]
y_axis = selected
# Decide for the x_axis, by default the index will be set to zero
x_index = 0
if x_axis:
for index_name, name in enumerate(names):
if name.find(x_axis) != -1:
x_index = index_name
break
# Store to text
text += ['y_axis = %s' % selected]
text += ['tex_names = %s' % [elem for (elem, name) in
zip(tex_names, names) if name in selected]]
text += ["x_axis = '%s'" % tex_names[x_index]]
text += ["ylim = %s" % ylim]
text += ["xlim = %s" % xlim]
for selec in y_axis:
index_selec = names.index(selec)
plot_line = 'ax.'
if scale == 'lin':
plot_line += 'plot(curve[:, %i], curve[:, %i])' % (
x_index, index_selec)
ax.plot(curve[:, x_index], curve[:, index_selec])
elif scale == 'loglog':
plot_line += 'loglog(curve[:, %i], abs(curve[:, %i]))' % (
x_index, index_selec)
ax.loglog(curve[:, x_index], abs(curve[:, index_selec]))
elif scale == 'loglin':
plot_line += 'semilogx(curve[:, %i], curve[:, %i])' % (
x_index, index_selec)
ax.semilogx(curve[:, x_index], curve[:, index_selec])
elif scale == 'george':
plot_line += 'plot(curve[:, %i], curve[:, %i])' % (
x_index, index_selec)
ax.plot(curve[:, x_index], curve[:, index_selec])
ax.set_xscale('planck')
text += [plot_line]
legend.extend([roots[index]+': '+elem for elem in y_axis])
ax.legend(legend, loc='best')
text += ["",
"ax.legend([root+': '+elem for (root, elem) in",
" itertools.product(roots, y_axis)], loc='best')",
""]
else:
ref = data[0]
num_columns, ref_curve_names, ref_tex_names = extract_headers(files[0])
# Check if everything is in order
if num_columns == 2:
y_axis_ref = [ref_curve_names[1]]
elif num_columns > 2:
# in case y_axis was only a string, cast it to a list
if isinstance(original_y_axis, str):
y_axis_ref = [original_y_axis]
else:
y_axis_ref = original_y_axis
# Store the selected text and tex_names to the script
selected = []
for elem in y_axis_ref:
selected.extend([name for name in ref_curve_names if name.find(elem) != -1 and
name not in selected])
y_axis_ref = selected
# Decide for the x_axis, by default the index will be set to zero
x_index_ref = 0
if x_axis:
for index_name, name in enumerate(ref_curve_names):
if name.find(x_axis) != -1:
x_index_ref = index_name
break
for idx in range(1, len(data)):
current = data[idx]
num_columns, names, tex_names = extract_headers(files[idx])
# Check if everything is in order
if num_columns == 2:
y_axis = [names[1]]
elif num_columns > 2:
# in case y_axis was only a string, cast it to a list
if isinstance(original_y_axis, str):
y_axis = [original_y_axis]
else:
y_axis = original_y_axis
# Store the selected text and tex_names to the script
selected = []
for elem in y_axis:
selected.extend([name for name in names if name.find(elem) != -1 and
name not in selected])
y_axis = selected
text += ['y_axis = %s' % selected]
text += ['tex_names = %s' % [elem for (elem, name) in
zip(tex_names, names) if name in selected]]
# Decide for the x_axis, by default the index will be set to zero
x_index = 0
if x_axis:
for index_name, name in enumerate(names):
if name.find(x_axis) != -1:
x_index = index_name
break
text += ["x_axis = '%s'" % tex_names[x_index]]
for selec in y_axis:
# Do the interpolation
axis = ref[:, x_index_ref]
reference = ref[:, ref_curve_names.index(selec)]
#plt.loglog(current[:, x_index], current[:, names.index(selec)])
#plt.show()
#interpolated = splrep(current[:, x_index],
#current[:, names.index(selec)])
interpolated = InterpolatedUnivariateSpline(current[:, x_index],
current[:, names.index(selec)])
if scale == 'lin':
#ax.plot(axis, splev(ref[:, x_index_ref],
#interpolated)/reference-1)
ax.plot(axis, interpolated(ref[:, x_index_ref])/reference-1)
elif scale == 'loglin':
#ax.semilogx(axis, splev(ref[:, x_index_ref],
#interpolated)/reference-1)
ax.semilogx(axis, interpolated(ref[:, x_index_ref])/reference-1)
elif scale == 'loglog':
raise InputError(
"loglog plot is not available for ratios")
if 'TT' in names:
ax.set_xlabel('$\ell$', fontsize=16)
text += ["ax.set_xlabel('$\ell$', fontsize=16)"]
elif 'P' in names:
ax.set_xlabel('$k$ [$h$/Mpc]', fontsize=16)
text += ["ax.set_xlabel('$k$ [$h$/Mpc]', fontsize=16)"]
else:
ax.set_xlabel(tex_names[x_index], fontsize=16)
text += ["ax.set_xlabel('%s', fontsize=16)" % tex_names[x_index]]
if xlim:
if len(xlim) > 1:
ax.set_xlim(xlim)
text += ["ax.set_xlim(xlim)"]
else:
ax.set_xlim(xlim[0])
text += ["ax.set_xlim(xlim[0])"]
ax.set_ylim()
text += ["ax.set_ylim()"]
if ylim:
if len(ylim) > 1:
ax.set_ylim(ylim)
text += ["ax.set_ylim(ylim)"]
else:
ax.set_ylim(ylim[0])
text += ["ax.set_ylim(ylim[0])"]
text += ['plt.show()']
plt.show()
# If the use wants to print the figure to a file
if printing:
fig.savefig(printing)
text += ["fig.savefig('%s')" % printing]
# Write to the python file all the issued commands. You can then reproduce
# the plot by running "python output/something_cl.dat.py"
with open(python_script_path, 'w') as python_script:
print('Creating a python script to reproduce the figure')
print('--> stored in %s' % python_script_path)
python_script.write('\n'.join(text))
# If the use wants to print the figure to a file
if printing:
fig.savefig(printing)
class FormatError(Exception):
"""Format not recognised"""
pass
class TypeError(Exception):
"""Spectrum type not recognised"""
pass
class NumberOfFilesError(Exception):
"""Invalid number of files"""
pass
class InputError(Exception):
"""Incompatible input requirements"""
pass
def replace_scale(string):
"""
This assumes that the string starts with "(.)", which will be replaced by
(8piG/3)
>>> print replace_scale('(.)toto')
>>> '(8\\pi G/3)toto'
"""
string_list = list(string)
string_list.pop(1)
string_list[1:1] = list('8\\pi G/3')
return ''.join(string_list)
def process_long_names(long_names):
"""
Given the names extracted from the header, return two arrays, one with the
short version, and one tex version
>>> names, tex_names = process_long_names(['(.)toto', 'proper time [Gyr]'])
>>> print names
>>> ['toto', 'proper time']
>>> print tex_names
>>> ['(8\\pi G/3)toto, 'proper time [Gyr]']
"""
names = []
tex_names = []
# First pass, to remove the leading scales
for name in long_names:
# This can happen in the background file
if name.startswith('(.)', 0):
temp_name = name[3:]
names.append(temp_name)
tex_names.append(replace_scale(name))
# Otherwise, we simply
else:
names.append(name)
tex_names.append(name)
# Finally, remove any extra spacing
names = [''.join(elem.split()) for elem in names]
return names, tex_names
def extract_headers(header_path):
with open(header_path, 'r') as header_file:
header = [line for line in header_file if line[0] == '#']
header = header[-1]
# Count the number of columns in the file, and recover their name. Thanks
# Thomas Tram for the trick
indices = [i+1 for i in range(len(header)) if
header.startswith(':', i)]
num_columns = len(indices)
long_names = [header[indices[i]:indices[(i+1)]-3].strip() if i < num_columns-1
else header[indices[i]:].strip()
for i in range(num_columns)]
# Process long_names further to handle special cases, and extract names,
# which will correspond to the tags specified in "y_axis".
names, tex_names = process_long_names(long_names)
return num_columns, names, tex_names
def main():
print('~~~ Running CPU, a CLASS Plotting Utility ~~~')
parser = CPU_parser()
# Parse the command line arguments
args = parser.parse_args()
# if there are no argument in the input, print usage
if len(args.files) == 0:
parser.print_usage()
return
# if the first file name contains cl or pk, infer the type of desired
# spectrum
if not args.y_axis:
if args.files[0].rfind('cl') != -1:
scale = 'loglog'
elif args.files[0].rfind('pk') != -1:
scale = 'loglog'
else:
scale = 'lin'
args.y_axis = []
else:
scale = ''
if not args.scale:
if scale:
args.scale = scale
else:
args.scale = 'lin'
# Remove extra spacing in the y_axis list
args.y_axis = [''.join(elem.split()) for elem in args.y_axis]
# If ratio is asked, but only one file was passed in argument, politely
# complain
if args.ratio:
if len(args.files) < 2:
raise NumberOfFilesError(
"If you want me to compute a ratio between two files, "
"I strongly encourage you to give me at least two of them.")
# actual plotting. By default, a simple superposition of the graph is
# performed. If asked to be divided, the ratio is shown - whether a need
# for interpolation arises or not.
if args.ratio and args.scale == 'loglog':
print("Defaulting to loglin scale")
args.scale = 'loglin'
plot_CLASS_output(args.files, args.x_axis, args.y_axis,
ratio=args.ratio, printing=args.printfile,
scale=args.scale, xlim=args.xlim, ylim=args.ylim)
# Helper code from cosmo_mini_toolbox, by Jesus Torrado, available fully at
# https://github.com/JesusTorrado/cosmo_mini_toolbox, to use the log then
# linear scale for the multipole axis when plotting Cl.
nonpos = "mask"
change = 50.0
factor = 500.
def _mask_nonpos(a):
"""
Return a Numpy masked array where all non-positive 1 are
masked. If there are no non-positive, the original array
is returned.
"""
mask = a <= 0.0
if mask.any():
return ma.MaskedArray(a, mask=mask)
return a
def _clip_smaller_than_one(a):
a[a <= 0.0] = 1e-300
return a
class PlanckScale(mscale.ScaleBase):
"""
Scale used by the Planck collaboration to plot Temperature power spectra:
base-10 logarithmic up to l=50, and linear from there on.
Care is taken so non-positive values are not plotted.
"""
name = 'planck'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(
FixedLocator(
np.concatenate((np.array([2, 10, change]),
np.arange(500, 2500, 500)))))
axis.set_minor_locator(
FixedLocator(
np.concatenate((np.arange(2, 10),
np.arange(10, 50, 10),
np.arange(floor(change/100), 2500, 100)))))
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self.PlanckTransform(nonpos)
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
return (vmin <= 0.0 and minpos or vmin,
vmax <= 0.0 and minpos or vmax)
class PlanckTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, nonpos):
Transform.__init__(self)
if nonpos == 'mask':
self._handle_nonpos = _mask_nonpos
else:
self._handle_nonpos = _clip_nonpos
def transform_non_affine(self, a):
lower = a[np.where(a<=change)]
greater = a[np.where(a> change)]
if lower.size:
lower = self._handle_nonpos(lower * 10.0)/10.0
if isinstance(lower, ma.MaskedArray):
lower = ma.log10(lower)
else:
lower = np.log10(lower)
lower = factor*lower
if greater.size:
greater = (factor*np.log10(change) + (greater-change))
# Only low
if not(greater.size):
return lower
# Only high
if not(lower.size):
return greater
return np.concatenate((lower, greater))
def inverted(self):
return PlanckScale.InvertedPlanckTransform()
class InvertedPlanckTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def transform_non_affine(self, a):
lower = a[np.where(a<=factor*np.log10(change))]
greater = a[np.where(a> factor*np.log10(change))]
if lower.size:
if isinstance(lower, ma.MaskedArray):
lower = ma.power(10.0, lower/float(factor))
else:
lower = np.power(10.0, lower/float(factor))
if greater.size:
greater = (greater + change - factor*np.log10(change))
# Only low
if not(greater.size):
return lower
# Only high
if not(lower.size):
return greater
return np.concatenate((lower, greater))
def inverted(self):
return PlanckTransform()
# Finished. Register the scale!
mscale.register_scale(PlanckScale)
if __name__ == '__main__':
sys.exit(main())
| 22,565 | 35.221509 | 90 | py |
class_DMDR | class_DMDR-master/test_python.py | from classy import Class
| 25 | 12 | 24 | py |
class_DMDR | class_DMDR-master/external/external_Pk/generate_Pk_example_w_tensors.py | #!/usr/bin/python
from __future__ import print_function
import sys
from math import exp
# README:
#
# This is an example python script for the external_Pk mode of Class.
# It generates the primordial spectrum of LambdaCDM.
# It can be edited and used directly, though keeping a copy of it is recommended.
#
# Two (maybe three) things need to be edited:
#
# 1. The name of the parameters needed for the calculation of Pk.
# "sys.argv[1]" corresponds to "custom1" in Class, an so on
try :
k_0 = float(sys.argv[1])
A_s = float(sys.argv[2])
n_s = float(sys.argv[3])
A_t = float(sys.argv[4])
n_t = float(sys.argv[5])
# Error control, no need to touch
except IndexError :
raise IndexError("It seems you are calling this script with too few arguments.")
except ValueError :
raise ValueError("It seems some of the arguments are not correctly formatted. "+
"Remember that they must be floating point numbers.")
# 2. The function giving P(k), including the necessary import statements.
# Inside this function, you can use the parameters named in the previous step.
def P_s(k) :
return A_s * (k/k_0)**(n_s-1.)
def P_t(k) :
return A_t * (k/k_0)**(n_t)
# 3. Limits for k and precision:
# Check that the boundaries are correct for your case.
# It is safer to set k_per_decade primordial slightly bigger than that of Class.
k_min = 1.e-6
k_max = 10.
k_per_decade_primordial = 200.
#
# And nothing should need to be edited from here on.
#
# Filling the array of k's
ks = [float(k_min)]
while ks[-1] <= float(k_max) :
ks.append(ks[-1]*10.**(1./float(k_per_decade_primordial)))
# Filling the array of Pk's
for k in ks :
print("%.18g %.18g %.18g" % (k, P_s(k), P_t(k)))
| 1,792 | 28.393443 | 84 | py |
class_DMDR | class_DMDR-master/external/external_Pk/generate_Pk_example.py | #!/usr/bin/python
from __future__ import print_function
import sys
from math import exp
# README:
#
# This is an example python script for the external_Pk mode of Class.
# It generates the primordial spectrum of LambdaCDM.
# It can be edited and used directly, though keeping a copy of it is recommended.
#
# Two (maybe three) things need to be edited:
#
# 1. The name of the parameters needed for the calculation of Pk.
# "sys.argv[1]" corresponds to "custom1" in Class, an so on
try :
k_0 = float(sys.argv[1])
A = float(sys.argv[2])
n_s = float(sys.argv[3])
# Error control, no need to touch
except IndexError :
raise IndexError("It seems you are calling this script with too few arguments.")
except ValueError :
raise ValueError("It seems some of the arguments are not correctly formatted. "+
"Remember that they must be floating point numbers.")
# 2. The function giving P(k), including the necessary import statements.
# Inside this function, you can use the parameters named in the previous step.
def P(k) :
return A * (k/k_0)**(n_s-1.)
# 3. Limits for k and precision:
# Check that the boundaries are correct for your case.
# It is safer to set k_per_decade primordial slightly bigger than that of Class.
k_min = 1.e-6
k_max = 10.
k_per_decade_primordial = 200.
#
# And nothing should need to be edited from here on.
#
# Filling the array of k's
ks = [float(k_min)]
while ks[-1] <= float(k_max) :
ks.append(ks[-1]*10.**(1./float(k_per_decade_primordial)))
# Filling the array of Pk's
for k in ks :
P_k = P(k)
print("%.18g %.18g" % (k, P_k))
| 1,662 | 28.175439 | 84 | py |
class_DMDR | class_DMDR-master/external/distortions/generate_PCA_files.py | #!/usr/bin/env python
import numpy as np
import sys
import scipy.interpolate as sciint
from numpy.linalg import norm as vector_norm
from numpy.linalg import eigh as eigen_vals_vecs
import os
import matplotlib.pyplot as plt
# Read inputs
if(len(sys.argv)==14):
sd_detector_name = sys.argv[1]
sd_detector_nu_min = eval(sys.argv[2])
sd_detector_nu_max = eval(sys.argv[3])
sd_detector_nu_delta = eval(sys.argv[4])
sd_detector_bin_number = eval(sys.argv[5])
sd_z_min = eval(sys.argv[6])
sd_z_max = eval(sys.argv[7])
sd_z_size = eval(sys.argv[8])
sd_detector_delta_Ic = eval(sys.argv[9])
sd_PCA_size = eval(sys.argv[10])
z_th = eval(sys.argv[11])
DI_units = eval(sys.argv[12]) # = 2.70062634e-18
x_to_nu = eval(sys.argv[13]) # = 56.7798
has_noisefile = False
elif(len(sys.argv)==11):
sd_detector_name = sys.argv[1]
sd_external_path = sys.argv[2]
sd_noisefile_name = sys.argv[3]
sd_z_min = eval(sys.argv[4])
sd_z_max = eval(sys.argv[5])
sd_z_size = eval(sys.argv[6])
sd_PCA_size = eval(sys.argv[7])
z_th = eval(sys.argv[8])
DI_units = eval(sys.argv[9]) # = 2.70062634e-18
x_to_nu = eval(sys.argv[10]) # = 56.7798
has_noisefile = True
else:
raise Exception("generate_PCA_files.py received invalid input arguments")
def PCA_string_to_array(line,delimiter=" "):
line = line.replace("\n","")
if delimiter is not "\t":
line = line.replace("\t","")
return np.array([float(x) for x in line.split(delimiter) if (x is not "" and x is not " ")])
def read_noisefile(filename):
with open(filename) as det_noise:
header = True
while(header):
line = det_noise.readline()
if(line.startswith("#")):
continue
header=False
Nrows,Ncols = PCA_string_to_array(line)
#Skip first line containing Nx,Ncols
line = det_noise.readline()
cols = []
while(line):
cols.append(PCA_string_to_array(line))
line = det_noise.readline()
cols = np.array(cols).T
assert(int(Ncols)==len(cols))
assert(int(Nrows)==len(cols[0]))
return len(cols[0]),cols[0]/x_to_nu,cols[1]*1e-26
dir_path = os.path.dirname(os.path.realpath(__file__))
# Read external file Greens_data.dat
readfile = "Greens_data.dat"
with open(os.path.join(dir_path,readfile)) as f:
# Read the header first
header = True
while(header):
line = f.readline()
if(line.startswith("#")):
continue
# The first line of the header without the "#" is still part of the header
header=False
# Read the first line specifying z
Greens_z = PCA_string_to_array(f.readline())
Greens_Nz = len(Greens_z)
Greens_lnz = np.log(Greens_z+1.)
# Read T_ini,T_last and rho
Greens_T_ini = PCA_string_to_array(f.readline())
Greens_T_last = PCA_string_to_array(f.readline())
Greens_drho = PCA_string_to_array(f.readline())
# Calculate the difference in Temperature
Greens_dT = (Greens_T_last-Greens_T_ini)/Greens_T_ini
# Read the rest of the file
done = False
Greens_data_full = []
while(not done):
line = f.readline()
if(not line):
done = True
else:
Greens_data_full.append(PCA_string_to_array(line))
Greens_data_full = np.array(Greens_data_full).T
# Seperate the rest of the data into x, Green(z,x) and the blackbody
Greens_x = Greens_data_full[0]
Greens_Nx = len(Greens_x)
Greens_G_th = Greens_data_full[1:Greens_Nz+1]
Greens_blackbody = Greens_data_full[Greens_Nz+1]
# Spline Greens function for interpolation
Greens_G_th_Spline = [None for index_x_old in range(Greens_Nx)]
for index_x_old in range(Greens_Nx):
Greens_G_th_Spline[index_x_old] = sciint.CubicSpline(Greens_lnz,Greens_G_th[:,index_x_old])
# Spline Greens dT for interpolation
Greens_T_ini_Spline = sciint.CubicSpline(Greens_lnz,Greens_T_ini)
Greens_T_last_Spline = sciint.CubicSpline(Greens_lnz,Greens_T_last)
Greens_dT_Spline = sciint.CubicSpline(Greens_lnz,Greens_dT)
Greens_drho_Spline = sciint.CubicSpline(Greens_lnz,Greens_drho)
# Define new z and x arrays
Nz_arr = sd_z_size
z_arr = np.logspace(np.log10(sd_z_min),np.log10(sd_z_max),Nz_arr)
lnz_arr = np.log(z_arr+1.)
if has_noisefile:
Nx_arr,x_arr,deltaIc_arr = read_noisefile(os.path.join(sd_external_path,sd_noisefile_name))
else:
Nx_arr = sd_detector_bin_number+1
x_arr = np.linspace(sd_detector_nu_min/x_to_nu,sd_detector_nu_max/x_to_nu,Nx_arr)
# Define visibility function
#bb_vis = np.exp(-(z_arr/2.021e6)**2.5)
bb_vis = np.exp(-(z_arr/z_th)**2.5)
# The Gth file of Chluba subtracts away some part of the G_T distortion into a shift from T_ini to T_last
# Here we calculate backwards, and obtain the shift of f_g due to the internal dT
df_g = Greens_dT_Spline(lnz_arr)/Greens_drho_Spline(lnz_arr)
# Initialize spectral shapes
G_th = np.zeros((Nx_arr,Nz_arr))
DI_T_shift = np.zeros((Nx_arr,Nz_arr))
Gdist = np.zeros(Nx_arr)
Ydist = np.zeros(Nx_arr)
Mdist = np.zeros(Nx_arr)
# Interpolate Green's function
index_x_old = 0
for index_x_new,x in enumerate(x_arr):
# Define spectral shapes
Gdist[index_x_new] = (x**4*np.exp(x)/(np.exp(x)-1)**2)*DI_units*1.0e18
Ydist[index_x_new] = Gdist[index_x_new]*(x/np.tanh(x/2.)-4.)
Mdist[index_x_new] = Gdist[index_x_new]*(1./2.19229-1./x)
x_s = Greens_T_ini_Spline(lnz_arr)/Greens_T_last_Spline(lnz_arr)*x
x_z = x*lnz_arr/lnz_arr
DI_T_shift[index_x_new,:] = DI_units*1.0e26*x_z**3.*(np.exp(-x_s)/(1.-np.exp(-x_s))-np.exp(-x_z)/(1.-np.exp(-x_z)))/Greens_drho_Spline(lnz_arr)
try:
# Find position in xarray
while(x>Greens_x[index_x_old]):
index_x_old += 1
# Linear interpolation in x
frac = (x-Greens_x[index_x_old])/(Greens_x[index_x_old+1]-Greens_x[index_x_old])
# Cubic interpolation for all values of z
lowx_vals = Greens_G_th_Spline[index_x_old](lnz_arr)
highx_vals = Greens_G_th_Spline[index_x_old+1](lnz_arr)
G_th[index_x_new,:] = (lowx_vals*(1.-frac)+highx_vals*frac)
G_th[index_x_new,:] *= bb_vis*1.e-8
G_th[index_x_new,:] += DI_T_shift[index_x_new,:]*1.e-8
#G_th[index_x_new,:] += Gdist[index_x_new]*df_g
except:
raise ValueError("{} is not in the file range [{},{}] for file '{}'".format(x,Greens_x[0],Greens_x[-1],readfile))
# Begin orthonormlization
# Y distortion
e_Y = Ydist/vector_norm(Ydist)
M_Y = np.dot(e_Y,Mdist)
G_Y = np.dot(e_Y,Gdist)
# Mu distortion
Mperp = Mdist-M_Y*e_Y
e_M = Mperp/vector_norm(Mperp)
G_M = np.dot(e_M,Gdist)
# G distortion
Gperp = Gdist-G_Y*e_Y-G_M*e_M
e_G = Gperp/vector_norm(Gperp)
f_g = np.zeros(Nz_arr)
f_mu = np.zeros(Nz_arr)
f_y = np.zeros(Nz_arr)
# Now, factorize G into orthonormal subspace
for index_z in range(Nz_arr):
# Compute non-normalized components
f_g[index_z] = (np.dot(G_th[:,index_z],e_G))/vector_norm(Gperp)
f_mu[index_z] = (np.dot(G_th[:,index_z],e_M)-G_M*f_g[index_z])/vector_norm(Mperp)
f_y[index_z] = (np.dot(G_th[:,index_z],e_Y)-M_Y*f_mu[index_z]-G_Y*f_g[index_z])/vector_norm(Ydist)
# Now we can re-normalize our functions and add the shift
J_g = 4.*f_g
J_mu = f_mu/1.401
J_y = 4.*f_y
# Calculate non-normalized residual
Residual = np.zeros((Nx_arr,Nz_arr))
for index_x in range(Nx_arr):
for index_z in range(Nz_arr):
Residual[index_x,index_z] = G_th[index_x,index_z]-Gdist[index_x]*f_g[index_z]-Ydist[index_x]*f_y[index_z]-Mdist[index_x]*f_mu[index_z]
# Calculate Fisher matrix
Fisher = np.zeros((Nz_arr,Nz_arr))
delta_ln_z = np.log(z_arr[1])-np.log(z_arr[0])
for index_za in range(Nz_arr):
for index_zb in range(Nz_arr):
if has_noisefile:
Fisher[index_za,index_zb] = np.sum(Residual[:,index_za]*Residual[:,index_zb]*pow(delta_ln_z/deltaIc_arr[:]*1.e8,2.))
else:
Fisher[index_za,index_zb] = np.sum(Residual[:,index_za]*Residual[:,index_zb]*pow(delta_ln_z/sd_detector_delta_Ic*1.e8,2.))
# Solve eigenvalue problem
eigvals,eigvecs = eigen_vals_vecs(Fisher)
eigvals = eigvals[::-1]
eigvecs = eigvecs[:,::-1]
E_vecs = np.real(eigvecs[:,:sd_PCA_size]).T
S_vecs = np.zeros((sd_PCA_size,Nx_arr))
for index_pca in range(sd_PCA_size):
for index_x in range(Nx_arr):
S_vecs[index_pca][index_x] = np.dot(E_vecs[index_pca],Residual[index_x,:]*delta_ln_z)
# Create output files
form = "%.6e" #Output formatting
# Write file for branching ratio (Evec)
with open(os.path.join(dir_path,sd_detector_name+"_branching_ratios.dat"),"w") as brfile:
brfile.write("# In the file there is: z, J_T, J_y, J_mu, E_i (i=1-{})\n".format(sd_PCA_size))
brfile.write("# The first line contains the number of lines and the number of columns.\n".format(sd_PCA_size))
brfile.write("{} {}\n".format(Nz_arr,sd_PCA_size))
for index_z in range(Nz_arr):
brfile.write((form+" ") % z_arr[index_z])
brfile.write((form+" ") % f_g[index_z])
brfile.write((form+" ") % f_y[index_z])
brfile.write((form ) % f_mu[index_z])
for index_pca in range(sd_PCA_size):
brfile.write((" "+form) % E_vecs[index_pca][index_z])
brfile.write("\n")
# Write file for distortion shapes (Svec)
with open(os.path.join(dir_path,sd_detector_name+"_distortions_shapes.dat"),"w") as dsfile:
dsfile.write("# In the file there is: nu, G_T, Y_SZ, M_mu, S_i (i=1-{})\n".format(sd_PCA_size))
dsfile.write("# The first line contains the number of lines and the number of columns.\n".format(sd_PCA_size))
dsfile.write("{} {}\n".format(Nx_arr,sd_PCA_size))
for index_x in range(Nx_arr):
dsfile.write((form+" ") % (x_arr[index_x]*x_to_nu))
dsfile.write((form+" ") % Gdist[index_x])
dsfile.write((form+" ") % Ydist[index_x])
dsfile.write((form ) % Mdist[index_x])
for index_pca in range(sd_PCA_size):
dsfile.write((" "+form) % S_vecs[index_pca][index_x])
dsfile.write("\n")
# Update list of detectors
# Open and read already present list
with open(os.path.join(dir_path,"detectors_list.dat"),"a") as detector_file:
if has_noisefile:
detector_file.write('%s %s\n' % (sd_detector_name, sd_noisefile_name))
else:
detector_file.write('%s %.6e %.6e %.6e %i %.6e\n' % (sd_detector_name, sd_detector_nu_min, sd_detector_nu_max, sd_detector_nu_delta, sd_detector_bin_number, sd_detector_delta_Ic))
| 10,312 | 36.638686 | 190 | py |
class_DMDR | class_DMDR-master/external/RealSpaceInterface/colormap_converter.py | import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
OUTPUT_DIR = os.path.join("static", "images", "colormaps")
WIDTH = 512
def create_image(cmap, width):
values = np.linspace(0, 1, width)
colors = cmap(values).reshape((1, width, 4))
image = Image.fromarray(np.uint8(255 * colors))
return image
cmap_names = {}
cmap_names['Uniform'] = [
'viridis', 'plasma', 'inferno', 'magma']
cmap_names['Diverging'] = [
'seismic', 'RdYlBu', 'Spectral'
]
cmap_names['Miscellaneous'] = ['jet']
if __name__ == "__main__":
for category in cmap_names:
category_dir = os.path.join(OUTPUT_DIR, category)
if not os.path.exists(category_dir):
os.mkdir(category_dir)
for name in cmap_names[category]:
result = create_image(plt.get_cmap(name), width=WIDTH)
output_path = os.path.join(category_dir, "{}.png".format(name))
print(output_path)
result.save(output_path)
| 1,042 | 28.8 | 75 | py |
class_DMDR | class_DMDR-master/external/RealSpaceInterface/config.py | import os
# Default port number to listen on. Can be overriden by passing a port number
# as the first command line argument, e.g. `python tornadoserver.py 1234`
PORT = 7777
# Directory to store previously computed transfer functions, spectra etc. in
DATABASE_DIR = "cache"
# Maximum number of thread pool workers (only required for multi-user usage)
MAX_THREADPOOL_WORKERS = 8
# Path of colormap directory relative to the static directory from which
# tornado serves static files
COLORMAP_PATH = os.path.join("images", "colormaps")
# number of sample points for the transfer function that is displayed
# in the client
TRANSFER_FUNCTION_CLIENT_SAMPLES = 400
# number of sample points for the matter spectrum that is displayed
# in the client per decade
MATTER_SPECTRUM_CLIENT_SAMPLES_PER_DECADE = 40
| 806 | 32.625 | 77 | py |
class_DMDR | class_DMDR-master/external/RealSpaceInterface/tornadoserver.py | from Calc2D.CalculationClass import Calculation
import time
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from tornado.ioloop import IOLoop
from tornado import gen
import tornado.web
import tornado.websocket
import os
import os.path
import json
import unicodedata
import logging
import base64
import traceback
import sys
import config
pool = ThreadPoolExecutor(max_workers=config.MAX_THREADPOOL_WORKERS)
def generate_redshifts(redshift_config):
logging.info(redshift_config)
arrs = []
for conf in redshift_config:
log = conf["log"]
func = np.logspace if log else np.linspace
start = np.log10(conf["from"]) if log else conf["from"]
stop = np.log10(conf["to"]) if log else conf["to"]
arrs.append(func(start, stop, conf["points"]))
# Remove duplicates
return np.flip(np.unique(np.concatenate(arrs)), axis=0)
# Load available colormaps
def get_colormaps(path=config.COLORMAP_PATH):
categories = []
maps = []
order = {'Default': 1, 'Uniform': 2, 'Diverging': 3, 'Miscellaneous': 4}
cmap_directories = list(sorted(
os.listdir(os.path.join("static", path)),
key=lambda d: order[d]
))
for directory in cmap_directories:
categories.append(directory)
maps_for_category = []
for cmap in os.listdir(os.path.join("static", path, directory)):
maps_for_category.append({
'label': cmap[:cmap.rfind(".")],
'src': os.path.join(os.path.join(config.COLORMAP_PATH, directory, cmap)),
})
maps.append(maps_for_category)
return categories, maps
class SimulationHandler(tornado.web.RequestHandler):
def get(self):
categories, colormaps = get_colormaps()
self.render('RSI.html', categories=categories, colormaps=colormaps)
class DataConnection(tornado.websocket.WebSocketHandler):
def open(self):
logging.info("Client connected!")
self.calc = Calculation(kbins=config.TRANSFER_FUNCTION_CLIENT_SAMPLES)
# Send list of `k` values only once
logging.info("Sending k range to client");
self.write_message(json.dumps({
"type": "krange",
"k": self.calc.krange.tolist()
}))
def on_close(self):
logging.info("Connection was closed")
@gen.coroutine
def on_message(self, message):
message = json.loads(message)
param_type = message['type']
logging.debug("Received message from client: {}".format(message))
params = message['params']
if param_type == "Initial":
initialDataType = str(params['initialDataType'])
size = params["xScale"]
resolution = int(params["resolution"])
self.calc.resolution = resolution
self.calc.size = size
logging.info("Size: {} x {} Mpc^2, resolution: {} x {}".format(size, size, resolution, resolution))
SIlimit = params['SILimit']
if SIlimit == "None":
SIlimit = None
sigma = float(params['sigma'])
SI_ns = params['n_s']
if initialDataType == "SI":
A_s = 2.214 * 10**(-9)
else:
A_s = 1
redshift = generate_redshifts(params["redshift"])
self.calc.redshift = redshift
self.write_message(
json.dumps({
'type': 'redshift',
'redshift': redshift.tolist()
}))
logging.info("Submitting initial state generation to ThreadPoolExecutor")
yield pool.submit(self.set_initial_condition, sigma, initialDataType,
SIlimit, SI_ns, A_s)
self.send_initial_state()
self.write_message(json.dumps({'type': 'success', 'sort': 'Initial'}))
elif param_type == "Cosmo":
logging.info("Received cosmological parameters")
cosmological_parameters = params
logging.info("Submitting calculation to ThreadPoolExecutor")
messages = yield pool.submit(self.set_cosmological_parameters, cosmological_parameters)
for message in messages:
self.write_message(json.dumps(message))
elif param_type == "Start":
logging.info("Starting propagation...")
try:
for redindex, z in enumerate(self.calc.redshift):
self.send_frame(redindex)
self.write_message(json.dumps({'type': 'success', 'sort': 'Data'}))
except Exception as e:
logging.exception(e)
self.send_exception(e)
def send_frame(self, redindex):
# `extrema`: (minimum, maximum) of (real space) data
Valuenew, FValuenew, extrema = self.calc.getData(redindex)
logging.info("Sending data for redshift = {}".format(self.calc.redshift[redindex]))
# Create data to be displayed in transfer function window
TransferData, _ = self.calc.getTransferData(redindex)
self.write_message(json.dumps({'type': 'extrema', 'extrema': extrema}))
progress = float(redindex) / len(self.calc.redshift)
real = {quantity: base64.b64encode(data.astype(np.float32)) for quantity, data in Valuenew.iteritems()}
transfer = {quantity: base64.b64encode(data.astype(np.float32)) for quantity, data in TransferData.iteritems()}
self.write_message(
json.dumps({
'type': 'data',
'progress': progress,
'real': real,
'fourier': [],
'transfer': transfer,
}))
def send_initial_state(self):
Value, FValue, extrema = self.calc.getInitialData()
TransferData = np.ones(config.TRANSFER_FUNCTION_CLIENT_SAMPLES)
krange = np.zeros(config.TRANSFER_FUNCTION_CLIENT_SAMPLES)
logging.info("Sending initial data to client.")
self.write_message({
"type": "resolution",
"value": self.calc.resolution
})
extremastring = json.dumps({'type': 'extrema', 'extrema': extrema})
datastring = json.dumps({
'type': 'data',
'real': base64.b64encode(Value.astype(np.float32)),
'fourier': [],
'transfer': base64.b64encode(TransferData.astype(np.float32)),
'k': krange.tolist()
})
self.write_message(extremastring)
self.write_message(datastring)
def set_initial_condition(self, sigma, initialDataType, SIlimit, SI_ns, A_s):
try:
self.calc.setInitialConditions(
sigma=sigma,
initialDataType=initialDataType,
SIlimit=SIlimit,
SI_ns=SI_ns,
A=A_s
)
except Exception as e:
logging.exception(e)
self.send_exception(e)
def send_exception(self, e):
self.write_message(json.dumps({'type': 'exception', 'exception': traceback.format_exc()}))
def set_cosmological_parameters(self, cosmologicalParameters):
try:
messages = []
logging.info("Starting calculation...")
self.calc.setCosmologialParameters(cosmologicalParameters=cosmologicalParameters)
logging.info("Finished calculation!")
messages.append({'type': 'success', 'sort': 'Cosmo'})
messages.append({
'type': 'Cl',
'l': self.calc.tCl.l.tolist(),
'tCl': self.calc.tCl.tCl.tolist()
})
messages.append({
'type': 'mPk',
'kh': self.calc.mPk.kh.tolist(),
'Pkh': self.calc.mPk.Pkh.tolist()
})
z_of_decoupling = self.calc.z_dec
frame_of_decoupling = np.argmin(np.abs(z_of_decoupling - self.calc.redshift))
if self.calc.redshift[frame_of_decoupling] > z_of_decoupling:
frame_of_decoupling -= 1
messages.append({
'type': 'decoupling',
'frame': frame_of_decoupling,
'z': z_of_decoupling})
except Exception as e:
logging.exception(e)
self.send_exception(e)
else:
return messages
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = tornado.web.Application(
[
(r"/", SimulationHandler),
(r"/datasocket", DataConnection),
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=True,
)
PORT = config.PORT if len(sys.argv) == 1 else int(sys.argv[1])
application.listen(PORT)
logging.info("Application launched on http://localhost:{}".format(PORT))
IOLoop.instance().current().start()
if __name__ == '__main__':
main()
| 9,009 | 35.184739 | 119 | py |
class_DMDR | class_DMDR-master/external/RealSpaceInterface/Calc2D/TransferFunction.py | import os.path
import pickle
import uuid
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline, RectBivariateSpline
import sys
import logging
from classy import Class
import Calc2D.Database as Database
import config
TRANSFER_QUANTITIES = ["d_g", "d_ur", "d_cdm", "d_b", "d_g/4 + psi"]
def ComputeTransferData(settings, redshift):
database_key = settings.copy()
database_key.update({'redshift': tuple(redshift)})
database = Database.Database(config.DATABASE_DIR)
if database_key in database:
return database[database_key], redshift
else:
cosmo = Class()
cosmo.set(settings)
cosmo.compute()
outputData = [cosmo.get_transfer(z) for z in redshift]
# Calculate d_g/4+psi
for transfer_function_dict in outputData:
transfer_function_dict["d_g/4 + psi"] = transfer_function_dict["d_g"]/4 + transfer_function_dict["psi"]
# Now filter the relevant fields
fields = TRANSFER_QUANTITIES + ["k (h/Mpc)"]
outputData = [{field: outputData[i][field] for field in fields} for i in range(len(redshift))]
database[database_key] = outputData
return outputData, redshift
def ComputeTransferFunctionList(cosmologicalParameters, redshift, kperdecade=200, P_k_max=100):
class_settings = cosmologicalParameters.copy()
class_settings.update({
"output": "mTk",
"gauge": "newtonian",
"evolver": "1",
"P_k_max_h/Mpc": P_k_max,
"k_per_decade_for_pk": kperdecade,
"z_max_pk": str(max(redshift)),
})
data_dict, redshift = ComputeTransferData(class_settings, redshift)
transfer_functions = {field: [] for field in TRANSFER_QUANTITIES}
for i in range(len(redshift)):
k_data = data_dict[0]["k (h/Mpc)"] * cosmologicalParameters["h"] #in order to get k [1/Mpc]
k_data_zero = np.concatenate(([0.0], k_data))
for field in TRANSFER_QUANTITIES:
data = data_dict[i][field] / data_dict[i][field][0]
data_zero = np.concatenate(([1.0], data))
interpolated_func = InterpolatedUnivariateSpline(k_data_zero, data_zero)
transfer_functions[field].append(interpolated_func)
return transfer_functions
| 2,263 | 33.830769 | 115 | py |
class_DMDR | class_DMDR-master/external/RealSpaceInterface/Calc2D/DataGeneration.py | import logging
import numpy as np
import cv2
from Calc2D.rFourier import realFourier, realInverseFourier
def GenerateGaussianData(sigma, size, points, A=1):
xr = np.linspace(-size / 2.0, size / 2.0, points)
yr = np.linspace(-size / 2.0, size / 2.0, points)
step = xr[1] - xr[0]
x, y = np.meshgrid(
xr, yr, indexing='ij', sparse=True) # indexing is important
del xr, yr
#use the more easy formula
Value = A * np.exp(-(x**2 + y**2) / (2 * sigma**2))
kx, ky, FValue = realFourier(step, Value)
kxr, kyr = np.meshgrid(kx, ky, indexing='ij', sparse=True)
k = np.sqrt(kxr**2 + kyr**2)
del kxr, kyr
kx = (min(kx), max(kx)) #just return the extremal values to save memory
ky = (min(ky), max(ky))
ValueE = (Value.min(), Value.max())
return ValueE, FValue, k, kx, ky
def GenerateSIData(A, size, points, limit=None, ns=0.96):
xr = np.linspace(-size / 2.0, size / 2.0, points)
yr = np.linspace(-size / 2.0, size / 2.0, points)
step = xr[1] - xr[0]
x, y = np.meshgrid(
xr, yr, indexing='ij', sparse=True) # indexing is important
del xr, yr
Value = 0 * x + 0 * y
kx, ky, FValue = realFourier(step, Value) #FValue==0
kxr, kyr = np.meshgrid(kx, ky, indexing='ij', sparse=True)
k = np.sqrt(kxr**2 + kyr**2)
del kxr, kyr
if limit == None:
ktilde = k.flatten()
ktilde[np.argmin(k)] = 10**9 #just let the background be arbitrary low
ktilde = ktilde.reshape(k.shape)
FValue = np.random.normal(
loc=0,
scale=np.sqrt(A / ktilde**(
2 - (ns - 1) * 2. / 3.)) / np.sqrt(2)) + np.random.normal(
loc=0,
scale=np.sqrt(A / ktilde**
(2 - (ns - 1) * 2. / 3.)) / np.sqrt(2)) * 1j
elif type(limit) == list or type(limit) == tuple:
iunder, junder = np.where(k < limit[1])
for t in range(len(iunder)):
if k[iunder[t]][junder[t]] > limit[0] and k[iunder[t]][junder[t]] > 0:
FValue[iunder[t]][junder[t]] = np.random.normal(
loc=0,
scale=np.sqrt(A / k[iunder[t]][junder[t]]**
(2 - (ns - 1) * 2. / 3.)) /
np.sqrt(2)) + np.random.normal(
loc=0,
scale=np.sqrt(A / k[iunder[t]][junder[t]]**
(2 -
(ns - 1) * 2. / 3.)) / np.sqrt(2)) * 1j
else:
raise ValueError("limit must be None or tuple or list")
Value = realInverseFourier(FValue)
kx = (min(kx), max(kx))
ky = (min(ky), max(ky))
ValueE = (Value.min(), Value.max())
return ValueE, FValue, k, kx, ky
| 2,802 | 29.467391 | 82 | py |
class_DMDR | class_DMDR-master/external/RealSpaceInterface/Calc2D/CalculationClass.py | import os
import logging
import cv2
import numpy as np
from classy import Class
from Calc2D.TransferFunction import ComputeTransferFunctionList
from Calc2D.DataGeneration import GenerateGaussianData, GenerateSIData
from Calc2D.DataPropagation import PropagateDatawithList
from Calc2D.rFourier import *
from Calc2D.Database import Database
from collections import namedtuple
import config
ClSpectrum = namedtuple("Cls", ["l", "tCl"])
PkSpectrum = namedtuple("Pkh", ["kh", "Pkh"])
def normalize(real):
"""
Given the `real` data, i.e. either a 2d array or a flattened 1d array
of the relative density perturbations, normalize its values as follows:
The client expects the values to be in the interval of [-1, 1].
Take a symmetric interval around a `real` value of 0 and linearly
map it to the required interval [-1, 1].
"""
minimum, maximum = real.min(), real.max()
bound = max(abs(maximum), abs(minimum))
result = real / bound
return result
class Calculation(object):
def __init__(self,
kbins,
resolution = 200,
gauge="newtonian",
kperdecade=200,
P_k_max=100,
evolver=1):
# also sets `endshape` through setter
self.resolution = resolution
self.gauge = gauge
self.evolver = evolver
self.P_k_max = P_k_max
self.kperdecade = kperdecade
self.redshift = None # to be set later
self.size = None # to be set later
self.krange = np.logspace(-4, 1, kbins)
@property
def resolution(self):
return self._resolution
@resolution.setter
def resolution(self, resolution):
self._resolution = resolution
self.endshape = (resolution, resolution)
def getData(self, redshiftindex):
FValuenew = PropagateDatawithList(
k=self.k,
FValue=self.FValue,
zredindex=redshiftindex,
transferFunctionlist=self.TransferFunctionList)
Valuenew = dict()
FValue_abs = np.abs(self.FValue)
_min, _max = FValue_abs.min(), FValue_abs.max()
dimensions = (self.endshape[0] / 2, self.endshape[1])
for quantity, FT in FValuenew.items():
FT_abs = np.abs(FT)
FT_normalized = cv2.resize(FT_abs, dimensions).ravel()
FT_normalized = (FT_normalized - _min) / (_max - _min)
real = realInverseFourier(FT.reshape(self.FValue.shape))
# real = cv2.resize(real, self.endshape).ravel()
real = real.ravel()
minimum, maximum = real.min(), real.max()
Valuenew[quantity] = normalize(real)
return Valuenew, FValuenew, (minimum, maximum)
def getInitialData(self):
# for odd values of self._resolution, this is necessary
Value = cv2.resize(realInverseFourier(self.FValue), self.endshape)
minimum, maximum = Value.min(), Value.max()
Value = normalize(Value)
assert Value.size == self.resolution ** 2
return Value.ravel(), cv2.resize(
(np.abs(self.FValue) - np.abs(self.FValue).min()) /
(np.abs(self.FValue).max() - np.abs(self.FValue).min()),
(self.endshape[0] / 2, self.endshape[1])).ravel(), (minimum,
maximum)
def getTransferData(self, redshiftindex):
return {field: transfer_function[redshiftindex](self.krange) for field, transfer_function in self.TransferFunctionList.items()}, self.krange
def setCosmologialParameters(self, cosmologicalParameters):
self.cosmologicalParameters = cosmologicalParameters
# Calculate transfer functions
self.TransferFunctionList = ComputeTransferFunctionList(self.cosmologicalParameters, self.redshift)
# Calculate Cl's
self.tCl, self.mPk = self.calculate_spectra(self.cosmologicalParameters)
def calculate_spectra(self, cosmo_params, force_recalc=False):
settings = cosmo_params.copy()
settings.update({
"output": "tCl,mPk",
"evolver": "1",
"gauge": "newtonian",
"P_k_max_1/Mpc": 10,
})
database = Database(config.DATABASE_DIR, "spectra.dat")
if settings in database and not force_recalc:
data = database[settings]
ell = data["ell"]
tt = data["tt"]
kh = data["kh"]
Pkh = data["Pkh"]
self.z_rec = data["z_rec"]
else:
cosmo = Class()
cosmo.set(settings)
cosmo.compute()
# Cl's
data = cosmo.raw_cl()
ell = data["ell"]
tt = data["tt"]
# Matter spectrum
k = np.logspace(-3, 1, config.MATTER_SPECTRUM_CLIENT_SAMPLES_PER_DECADE * 4)
Pk = np.vectorize(cosmo.pk)(k, 0)
kh = k * cosmo.h()
Pkh = Pk / cosmo.h()**3
# Get redshift of decoupling
z_rec = cosmo.get_current_derived_parameters(['z_rec'])['z_rec']
self.z_rec = z_rec
# Store to database
database[settings] = {
"ell": data["ell"],
"tt": data["tt"],
"kh": k,
"Pkh": Pk,
"z_rec": z_rec,
}
return ClSpectrum(ell[2:], tt[2:]), PkSpectrum(kh, Pkh)
@property
def z_dec(self):
if self.z_rec is None:
raise ValueError("z_rec hasn't been computed yet")
return self.z_rec
def setInitialConditions(self,
A=1,
sigma=2,
initialDataType="SI",
SIlimit=None,
SI_ns=0.96):
logging.info("Generating Initial Condition")
if initialDataType == "Gaussian":
self.ValueE, self.FValue, self.k, self.kxE, self.kyE = GenerateGaussianData(
sigma, self.size, self.resolution)
elif initialDataType == "SI":
self.ValueE, self.FValue, self.k, self.kxE, self.kyE = GenerateSIData(
A,
self.size,
self.resolution,
limit=SIlimit,
ns=SI_ns)
else:
logging.warn("initialDataType " + str(initialDataType) + " not found")
| 6,414 | 33.12234 | 148 | py |
class_DMDR | class_DMDR-master/external/RealSpaceInterface/Calc2D/DataPropagation.py | import numpy as np
#uses one dimensional interpolation
def PropagateDatawithListOld(k,FValue,zredindex,transferFunctionlist):
return (transferFunctionlist[zredindex](k.ravel()) * FValue.ravel()).reshape(FValue.shape)
def PropagateDatawithList(k, FValue, zredindex, transferFunctionlist):
result = {}
for field, transfer_function in transferFunctionlist.items():
result[field] = (transfer_function[zredindex](k.ravel()) * FValue.ravel()).reshape(FValue.shape)
return result
#module with uses two dimensional interpolation and propagates all data at once (fastest but high memory consumption)
def PropagateAllData(k,FValue,allzred,transferFunction):
allFValue = np.ones((len(allzred),FValue.shape[0],FValue.shape[1]),dtype=complex)
for kxindex in range(FValue.shape[0]):
allFValue[:,kxindex,:] = transferFunction(allzred,k[kxindex])*FValue[kxindex]
return allFValue
#module with uses 2 dimensional interpolation (slowest but can be useful if the set of redshift changes very often)
def PropagateData(k,FValue,zred,transferFunction):
FValuenew = np.ones(FValue.shape,dtype=complex)
for kxindex in range(FValue.shape[0]):
allFValue[kxindex,:] = transferFunction(zred,k[kxindex])*FValue[kxindex]
return allFValue
| 1,256 | 32.078947 | 117 | py |
class_DMDR | class_DMDR-master/external/RealSpaceInterface/Calc2D/Database.py | import pickle
import os
import logging
import uuid
class Database:
def __init__(self, directory, db_file="database.dat"):
self.directory = directory
self.db_file = db_file
if not os.path.isdir(directory):
raise ValueError("'{}' is not a directory!".format(directory))
self.db_path = os.path.join(directory, db_file)
if not os.path.exists(self.db_path):
logging.info("No database found; Creating one at {}.".format(self.db_path))
with open(self.db_path, "w") as f:
pickle.dump(dict(), f)
self.db = self.__read_database()
def __read_database(self):
with open(self.db_path) as f:
return pickle.load(f)
def __write_database(self):
with open(self.db_path, "w") as f:
pickle.dump(self.db, f)
def __create_file(self, data):
filename = str(uuid.uuid4())
with open(os.path.join(self.directory, filename), "w") as f:
pickle.dump(data, f)
return filename
def __get_frozen_key(self, key):
return frozenset(key.items())
def __getitem__(self, key):
frozen_key = self.__get_frozen_key(key)
if frozen_key in self.db:
filename = self.db[frozen_key]
with open(os.path.join(self.directory, filename)) as f:
return pickle.load(f)
else:
raise KeyError("No data for key: {}".format(key))
def __setitem__(self, key, data):
frozen_key = self.__get_frozen_key(key)
self.db[frozen_key] = self.__create_file(data)
self.__write_database()
def __contains__(self, key):
"""
Return whether `self` contains a record
for the given `key`.
"""
return self.__get_frozen_key(key) in self.db | 1,820 | 30.396552 | 87 | py |
class_DMDR | class_DMDR-master/external/RealSpaceInterface/Calc2D/__init__.py | 0 | 0 | 0 | py |
|
class_DMDR | class_DMDR-master/external/RealSpaceInterface/Calc2D/rFourier.py | import numpy as np
import numpy.fft as fft
def realFourier(step, Value):
FValue = np.fft.fftshift(
np.fft.rfft2(Value), axes=(0)) #shifting only the x axes
kx = np.fft.fftshift(np.fft.fftfreq(Value.shape[0], d=step)) * 2 * np.pi
ky = np.fft.rfftfreq(Value.shape[0], d=step) * 2 * np.pi
return kx, ky, FValue
def realInverseFourier(FValue):
return np.fft.irfft2(np.fft.ifftshift(
FValue, axes=(0))) #shifting only on the x axes
def realInverseAllFourier(allFValue):
return np.fft.irfftn(
np.fft.ifftshift(allFValue, axes=(1)),
axes=(1, 2)) #shifting only on the x axes
| 633 | 27.818182 | 76 | py |
class_DMDR | class_DMDR-master/python/test_class.py | """
.. module:: test_class
:synopsis: python script for testing CLASS using nose
.. moduleauthor:: Benjamin Audren <benjamin.audren@gmail.com>
.. credits:: Benjamin Audren, Thomas Tram
.. version:: 1.0
This is a python script for testing CLASS and its wrapper Classy using nose.
To run the test suite, type
nosetests test_class.py
If you want to extract the problematic input parameters at a later stage,
you should type
nosetests test_class.py 1>stdoutfile 2>stderrfile
and then use the python script extract_errors.py on the stderrfile.
When adding a new input parameter to CLASS (by modifying input.c), you
should also include tests of this new input. You will be in one of the
two cases:
1: The new input is supposed to be compatible with any existing input.
This is the standard case when adding a new species for instance.
2: The new input is incompatible with one of the existing inputs. This
would be the case if you have added (or just want to test) some other
value of an already defined parameter. (Maybe you have allowed for
negative mass neutrinos and you want to test CLASS using a negative mass.)
In case 1, you must add an entry in the CLASS_INPUT dictionary:
CLASS_INPUT['Mnu'] = (
[{'N_eff': 0.0, 'N_ncdm': 1, 'm_ncdm': 0.06, 'deg_ncdm': 3.0},
{'N_eff': 1.5, 'N_ncdm': 1, 'm_ncdm': 0.03, 'deg_ncdm': 1.5}],
'normal')
The key 'Mnu' is not being used in the code, so its purpose is just to
describe the entry to the reader.
the value is a 2-tuple where the first entry [{},{},...,{}] is an array of
dictionaries containg the actual input to CLASS. The second entry is a keyword
which can be either 'normal' or 'power'. It tells the script how this input
will be combined with other inputs.
What does 'normal' and 'power' mean?
If an entry has the 'power' keyword, it will be combined with any other entry.
If an entry has the 'normal' keyword, it will not be combined with any other
entry having the 'normal' keyword, but it will be combined with all entries
carrying the 'power keyword.
Beware that the number of tests grow a lot when using the 'power' keyword.
In case 2, you should find the relevant entry and just add a new dictionary
to the array. E.g. if you want to test some negative mass model you should add
{'N_ncdm': 1, 'm_ncdm': -0.1, 'deg_ncdm': 1.0}
How are default parameters handled?
Any input array implicitly contains the empty dictionary. That means that if
Omega_k:0.0 is the default value, writing
CLASS_INPUT['Curvature'] = (
[{'Omega_k': 0.01},
{'Omega_k': -0.01}],
'normal')
will test the default value Omega_k=0.0 along with the two specified models.
How to deal with inconsistent input?
Sometimes a specific feature requires the presence of another input parameter.
For instance, if we ask for tensor modes we must have temperature and/or
polarisation in the output. If not, CLASS is supposed to fail during the
evaluation of the input module and return an error message. This fail is the
correct behaviour of CLASS. To implement such a case, modify the function
has_incompatible_input(self)
Comparing output: When the flag 'COMPARE_OUTPUT_GAUGE' is set to true, the code will
rerun CLASS for each case under Newtonian gauge and then compare Cl's and
matter power spectrum. If the two are not close enough, it will generate a
PDF plot of this and save it in the 'fail' folder.
"""
from __future__ import print_function
from six import iteritems
#from future.utils import iteritems
import matplotlib as mpl
mpl.use('Agg')
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os
import shutil
import unittest
from classy import Class
from classy import CosmoSevereError
from math import log10
from matplotlib.offsetbox import AnchoredText
from nose.plugins.attrib import attr
from parameterized import parameterized
# Customise test by reading environment variables
CLASS_VERBOSE = bool(int(os.getenv('CLASS_VERBOSE', '0'))) # Print output from CLASS?
COMPARE_OUTPUT_GAUGE = bool(int(os.getenv('COMPARE_OUTPUT_GAUGE', '0'))) # Compare synchronous and Newtonian gauge outputs?
COMPARE_OUTPUT_REF = bool(int(os.getenv('COMPARE_OUTPUT_REF', '0'))) # Compare classy with classyref?
POWER_ALL = bool(int(os.getenv('POWER_ALL', '0'))) # Combine every extension with each other? (Very slow!)
TEST_LEVEL = int(os.getenv('TEST_LEVEL', '0')) # 0 <= TEST_LEVEL <= 3
if COMPARE_OUTPUT_REF:
try:
import classyref
except:
COMPARE_OUTPUT_REF = False
# Define bounds on the relative and absolute errors of C(l) and P(k)
# between reference, Newtonian and Synchronous gauge
COMPARE_CL_RELATIVE_ERROR = 3e-3
COMPARE_CL_RELATIVE_ERROR_GAUGE = 5*3e-3
COMPARE_CL_ABSOLUTE_ERROR = 1e-20
COMPARE_PK_RELATIVE_ERROR = 1e-2
COMPARE_PK_RELATIVE_ERROR_GAUGE = 5*1e-2
COMPARE_PK_ABSOLUTE_ERROR = 1e-20
# Dictionary of models to test the wrapper against. Each of these scenario will
# be run against all the possible output choices (nothing, tCl, mPk, etc...),
# with or without non-linearities.
# Never input the default value, as this will be **automatically** tested
# against. Indeed, when not specifying a field, CLASS takes the default input.
CLASS_INPUT = {}
CLASS_INPUT['Output_spectra'] = (
[{'output': 'mPk', 'P_k_max_1/Mpc': 2},
{'output': 'tCl'},
{'output': 'tCl pCl lCl'},
{'output': 'mPk tCl lCl', 'P_k_max_1/Mpc': 2},
{'output': 'nCl sCl'},
{'output': 'tCl pCl nCl sCl'}],
'power')
CLASS_INPUT['Nonlinear'] = (
[{'non linear': 'halofit'}],
'power')
CLASS_INPUT['Lensing'] = (
[{'lensing': 'yes'}],
'power')
if TEST_LEVEL > 0:
CLASS_INPUT['Mnu'] = (
[{'N_ur': 0.0, 'N_ncdm': 1, 'm_ncdm': 0.06, 'deg_ncdm': 3.0},
{'N_ur': 1.5, 'N_ncdm': 1, 'm_ncdm': 0.03, 'deg_ncdm': 1.5}],
'normal')
if TEST_LEVEL > 1:
CLASS_INPUT['Curvature'] = (
[{'Omega_k': 0.01},
{'Omega_k': -0.01}],
'normal')
CLASS_INPUT['modes'] = (
[{'modes': 't'},
{'modes': 's, t'}],
'power')
CLASS_INPUT['Tensor_method'] = (
[{'tensor method': 'exact'},
{'tensor method': 'photons'}],
'power')
if TEST_LEVEL > 2:
CLASS_INPUT['Isocurvature_modes'] = (
[{'ic': 'ad,nid,cdi', 'c_ad_cdi': -0.5}],
'normal')
CLASS_INPUT['Scalar_field'] = (
[{'Omega_scf': 0.1, 'attractor_ic_scf': 'yes',
'scf_parameters': '10, 0, 0, 0'}],
'normal')
CLASS_INPUT['Inflation'] = (
[{'P_k_ini type': 'inflation_V'},
{'P_k_ini type': 'inflation_H'}],
'normal')
# CLASS_INPUT['Inflation'] = (
# [{'P_k_ini type': 'inflation_V'},
# {'P_k_ini type': 'inflation_H'},
# {'P_k_ini type': 'inflation_V_end'}],
# 'normal')
if POWER_ALL:
for k, v in iteritems(CLASS_INPUT):
models, state = v
CLASS_INPUT[k] = (models, 'power')
INPUTPOWER = []
INPUTNORMAL = [{}]
for key, value in list(CLASS_INPUT.items()):
models, state = value
if state == 'power':
INPUTPOWER.append([{}]+models)
else:
INPUTNORMAL.extend(models)
PRODPOWER = list(itertools.product(*INPUTPOWER))
DICTARRAY = []
for normelem in INPUTNORMAL:
for powelem in PRODPOWER: # itertools.product(*modpower):
temp_dict = normelem.copy()
for elem in powelem:
temp_dict.update(elem)
DICTARRAY.append(temp_dict)
TUPLE_ARRAY = []
for e in DICTARRAY:
TUPLE_ARRAY.append((e, ))
def powerset(iterable):
xs = list(iterable)
# note we return an iterator rather than a list
return itertools.chain.from_iterable(
itertools.combinations(xs, n) for n in range(1, len(xs)+1))
def custom_name_func(testcase_func, param_num, param):
special_keys = ['N_ncdm']
somekeys = []
for key in param.args[0].keys():
if key in special_keys:
somekeys.append(key)
elif 'mega' in key:
somekeys.append(key)
res = '{}_{:04d}_{}'.format(
testcase_func.__name__,
param_num,
parameterized.to_safe_name('_'.join(somekeys))
)
return res.strip('_')
class TestClass(unittest.TestCase):
"""
Testing Class and its wrapper classy on different cosmologies
To run it, do
~] nosetest test_class.py
It will run many times Class, on different cosmological scenarios, and
everytime testing for different output possibilities (none asked, only mPk,
etc..)
"""
@classmethod
def setUpClass(cls):
cls.faulty_figs_path = os.path.join(
os.path.sep.join(os.path.realpath(__file__).split(
os.path.sep)[:-1]),
'faulty_figs')
if os.path.isdir(cls.faulty_figs_path):
shutil.rmtree(cls.faulty_figs_path)
os.mkdir(cls.faulty_figs_path)
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
"""
set up data used in the tests.
setUp is called before each test function execution.
"""
self.cosmo = Class()
self.cosmo_newt = Class()
if CLASS_VERBOSE:
self.verbose = {
'input_verbose': 1,
'background_verbose': 1,
'thermodynamics_verbose': 1,
'perturbations_verbose': 1,
'transfer_verbose': 1,
'primordial_verbose': 1,
'harmonic_verbose': 1,
'fourier_verbose': 1,
'lensing_verbose': 1,
'distortions_verbose': 1,
'output_verbose': 1,
}
else:
self.verbose = {}
self.scenario = {}
def tearDown(self):
self.cosmo.struct_cleanup()
self.cosmo.empty()
self.cosmo = 0
self.cosmo_newt.struct_cleanup()
self.cosmo_newt.empty()
self.cosmo_newt = 0
del self.scenario
def poormansname(self, somedict):
string = "_".join(
[k+'='+str(v)
for k, v in list(somedict.items())])
string = string.replace('/', '%')
string = string.replace(',', '')
string = string.replace(' ', '')
return string
@parameterized.expand(TUPLE_ARRAY, doc_func=custom_name_func, custom_name_func=custom_name_func)
@attr('dump_ini_files')
def test_Valgrind(self, inputdict):
"""Dump files"""
self.scenario.update(inputdict)
self.name = self._testMethodName
if self.has_incompatible_input():
return
path = os.path.join(self.faulty_figs_path, self.name)
self.store_ini_file(path)
self.scenario.update({'gauge':'Newtonian'})
self.store_ini_file(path + 'N')
@parameterized.expand(TUPLE_ARRAY, doc_func=custom_name_func, custom_name_func=custom_name_func)
@attr('test_scenario')
def test_scenario(self, inputdict):
"""Test scenario"""
self.scenario.update(inputdict)
self.name = self._testMethodName
self.cosmo.set(dict(itertools.chain(self.verbose.items(), self.scenario.items())))
cl_dict = {
'tCl': ['tt'],
'lCl': ['pp'],
'pCl': ['ee', 'bb'],
'nCl': ['dd'],
'sCl': ['ll'],
}
# 'lensing' is always set to yes. Therefore, trying to compute 'tCl' or
# 'pCl' will fail except if we also ask for 'lCl'.
if self.has_incompatible_input():
self.assertRaises(CosmoSevereError, self.cosmo.compute)
return
else:
self.cosmo.compute()
self.assertTrue(
self.cosmo.state,
"Class failed to go through all __init__ methods")
# Depending
if 'output' in self.scenario.keys():
# Positive tests of raw cls
output = self.scenario['output']
for elem in output.split():
if elem in cl_dict.keys():
for cl_type in cl_dict[elem]:
is_density_cl = (elem == 'nCl' or elem == 'sCl')
if is_density_cl:
cl = self.cosmo.density_cl(100)
else:
cl = self.cosmo.raw_cl(100)
self.assertIsNotNone(cl, "raw_cl returned nothing")
cl_length = np.shape(cl[cl_type][0])[0] if is_density_cl else np.shape(cl[cl_type])[0]
self.assertEqual(cl_length, 101, "raw_cl returned wrong size")
if elem == 'mPk':
pk = self.cosmo.pk(0.1, 0)
self.assertIsNotNone(pk, "pk returned nothing")
# Negative tests of output functions
if not any([elem in list(cl_dict.keys()) for elem in output.split()]):
# testing absence of any Cl
self.assertRaises(CosmoSevereError, self.cosmo.raw_cl, 100)
if 'mPk' not in output.split():
# testing absence of mPk
self.assertRaises(CosmoSevereError, self.cosmo.pk, 0.1, 0)
if COMPARE_OUTPUT_REF or COMPARE_OUTPUT_GAUGE:
# Now compute same scenario in Newtonian gauge
self.cosmo_newt.set(
dict(list(self.verbose.items())+list(self.scenario.items())))
self.cosmo_newt.set({'gauge': 'newtonian'})
self.cosmo_newt.compute()
if COMPARE_OUTPUT_GAUGE:
# Compare synchronous and Newtonian gauge
self.assertTrue(
self.cosmo_newt.state,
"Class failed to go through all __init__ methods in Newtonian gauge")
self.compare_output(self.cosmo, "Synchronous", self.cosmo_newt, 'Newtonian', COMPARE_CL_RELATIVE_ERROR_GAUGE, COMPARE_PK_RELATIVE_ERROR_GAUGE)
if COMPARE_OUTPUT_REF:
# Compute reference models in both gauges and compare
cosmo_ref = classyref.Class()
cosmo_ref.set(self.cosmo.pars)
cosmo_ref.compute()
status = self.compare_output(cosmo_ref, "Reference", self.cosmo, 'Synchronous', COMPARE_CL_RELATIVE_ERROR, COMPARE_PK_RELATIVE_ERROR)
assert status, 'Reference comparison failed in Synchronous gauge!'
cosmo_ref = classyref.Class()
cosmo_ref.set(self.cosmo_newt.pars)
cosmo_ref.compute()
self.compare_output(cosmo_ref, "Reference", self.cosmo_newt, 'Newtonian', COMPARE_CL_RELATIVE_ERROR, COMPARE_PK_RELATIVE_ERROR)
assert status, 'Reference comparison failed in Newtonian gauge!'
def has_incompatible_input(self):
should_fail = False
# If we have tensor modes, we must have one tensor observable,
# either tCl or pCl.
if has_tensor(self.scenario):
if 'output' not in list(self.scenario.keys()):
should_fail = True
else:
output = self.scenario['output'].split()
if 'tCl' not in output and 'pCl' not in output:
should_fail = True
# If we have specified lensing, we must have lCl in output,
# otherwise lensing will not be read (which is an error).
if 'lensing' in list(self.scenario.keys()):
if 'output' not in list(self.scenario.keys()):
should_fail = True
else:
output = self.scenario['output'].split()
if 'lCl' not in output:
should_fail = True
elif 'tCl' not in output and 'pCl' not in output:
should_fail = True
# If we have specified a tensor method, we must have tensors.
if 'tensor method' in list(self.scenario.keys()):
if not has_tensor(self.scenario):
should_fail = True
# If we have specified non linear, we must have some form of
# perturbations output.
if 'non linear' in list(self.scenario.keys()):
if 'output' not in list(self.scenario.keys()):
should_fail = True
# If we ask for Cl's of lensing potential, number counts or cosmic shear, we must have scalar modes.
# The same applies to density and velocity transfer functions and the matter power spectrum:
if 'output' in self.scenario and 'modes' in self.scenario and self.scenario['modes'].find('s') == -1:
requested_output_types = set(self.scenario['output'].split())
for scalar_output_type in ['lCl', 'nCl', 'dCl', 'sCl', 'mPk', 'dTk', 'mTk', 'vTk']:
if scalar_output_type in requested_output_types:
should_fail = True
break
# If we specify initial conditions (for scalar modes), we must have
# perturbations and scalar modes.
if 'ic' in list(self.scenario.keys()):
if 'modes' in list(self.scenario.keys()) and self.scenario['modes'].find('s') == -1:
should_fail = True
if 'output' not in list(self.scenario.keys()):
should_fail = True
# If we use inflation module, we must have scalar modes,
# tensor modes, no vector modes and we should only have adiabatic IC:
if 'P_k_ini type' in list(self.scenario.keys()) and self.scenario['P_k_ini type'].find('inflation') != -1:
if 'modes' not in list(self.scenario.keys()):
should_fail = True
else:
if self.scenario['modes'].find('s') == -1:
should_fail = True
if self.scenario['modes'].find('v') != -1:
should_fail = True
if self.scenario['modes'].find('t') == -1:
should_fail = True
if 'ic' in list(self.scenario.keys()) and self.scenario['ic'].find('i') != -1:
should_fail = True
return should_fail
def compare_output(self, reference, reference_name, candidate, candidate_name, rtol_cl, rtol_pk):
status_pass = True
for elem in ['raw_cl', 'lensed_cl']:
# Try to get the elem, but if they were not computed, a
# CosmoComputeError should be raised. In this case, ignore the
# whole block.
try:
to_test = getattr(candidate, elem)()
except CosmoSevereError:
continue
ref = getattr(reference, elem)()
for key, value in list(ref.items()):
if key != 'ell':
# For all self spectra, try to compare allclose
if key[0] == key[1]:
# If it is a 'dd' or 'll', it is a dictionary.
if isinstance(value, dict):
for subkey in list(value.keys()):
try:
np.testing.assert_allclose(
value[subkey],
to_test[key][subkey],
rtol=rtol_cl,
atol=COMPARE_CL_ABSOLUTE_ERROR)
except AssertionError:
self.cl_faulty_plot(elem + "_" + key, value[subkey][2:], reference_name, to_test[key][subkey][2:], candidate_name, rtol_cl)
except TypeError:
self.cl_faulty_plot(elem + "_" + key, value[subkey][2:], reference_name, to_test[key][subkey][2:], candidate_name, rtol_cl)
else:
try:
np.testing.assert_allclose(
value,
to_test[key],
rtol=rtol_cl,
atol=COMPARE_CL_ABSOLUTE_ERROR)
except (AssertionError, TypeError) as e:
self.cl_faulty_plot(elem + "_" + key, value[2:], reference_name, to_test[key][2:], candidate_name, rtol_cl)
status_pass = False
# For cross-spectra, as there can be zero-crossing, we
# instead compare the difference.
else:
# First, we multiply each array by the biggest value
norm = max(
np.abs(value).max(), np.abs(to_test[key]).max())
value *= norm
to_test[key] *= norm
try:
np.testing.assert_array_almost_equal(
value, to_test[key], decimal=3)
except AssertionError:
self.cl_faulty_plot(elem + "_" + key, value[2:], reference_name, to_test[key][2:], candidate_name, rtol_cl)
status_pass = False
if 'output' in list(self.scenario.keys()):
if self.scenario['output'].find('mPk') != -1:
# testing equality of Pk
k = np.logspace(-2, log10(self.scenario['P_k_max_1/Mpc']), 50)
reference_pk = np.array([reference.pk(elem, 0) for elem in k])
candidate_pk = np.array([candidate.pk(elem, 0) for elem in k])
try:
np.testing.assert_allclose(
reference_pk,
candidate_pk,
rtol=rtol_pk,
atol=COMPARE_PK_ABSOLUTE_ERROR)
except AssertionError:
self.pk_faulty_plot(k, reference_pk, reference_name, candidate_pk, candidate_name, rtol_pk)
status_pass = False
return status_pass
def store_ini_file(self, path):
parameters = dict(list(self.verbose.items()) + list(self.scenario.items()))
with open(path + '.ini', 'w') as param_file:
param_file.write('# ' + str(parameters) + '\n')
if len(parameters) == 0:
# CLASS complains if the .ini file does not do anything.
param_file.write('write warnings = yes\n')
for key, value in list(parameters.items()):
param_file.write(key + " = " + str(value)+ '\n')
def cl_faulty_plot(self, cl_type, reference, reference_name, candidate, candidate_name, rtol):
path = os.path.join(self.faulty_figs_path, self.name)
fig, axes = plt.subplots(2, 1, sharex=True)
ell = np.arange(max(np.shape(candidate))) + 2
factor = ell*(ell + 1)/(2*np.pi) if cl_type[-2:] != 'pp' else ell**5
axes[0].plot(ell, factor*reference, label=reference_name)
axes[0].plot(ell, factor*candidate, label=candidate_name)
axes[1].semilogy(ell, 100*abs(candidate/reference - 1), label=cl_type)
axes[1].axhline(y=100*rtol, color='k', ls='--')
axes[-1].set_xlabel(r'$\ell$')
if cl_type[-2:] == 'pp':
axes[0].set_ylabel(r'$\ell^5 C_\ell^\mathrm{{{_cl_type}}}$'.format(_cl_type=cl_type[-2:].upper()))
else:
axes[0].set_ylabel(r'$\ell(\ell + 1)/(2\pi)C_\ell^\mathrm{{{_cl_type}}}$'.format(_cl_type=cl_type[-2:].upper()))
axes[1].set_ylabel('Relative error [%]')
for ax in axes:
ax.legend(loc='upper right')
fig.tight_layout()
fname = '{}_{}_{}_vs_{}.pdf'.format(path, cl_type, reference_name, candidate_name)
fig.savefig(fname, bbox_inches='tight')
plt.close(fig)
# Store parameters (contained in self.scenario) to text file
self.store_ini_file(path)
def pk_faulty_plot(self, k, reference, reference_name, candidate, candidate_name, rtol):
path = os.path.join(self.faulty_figs_path, self.name)
fig, axes = plt.subplots(2, 1, sharex=True)
axes[0].loglog(k, k**1.5*reference, label=reference_name)
axes[0].loglog(k, k**1.5*candidate, label=candidate_name)
axes[0].legend(loc='upper right')
axes[1].loglog(k, 100*np.abs(candidate/reference - 1))
axes[1].axhline(y=100*rtol, color='k', ls='--')
axes[-1].set_xlabel(r'$k\quad [\mathrm{Mpc}^{-1}]$')
axes[0].set_ylabel(r'$k^\frac{3}{2}P(k)$')
axes[1].set_ylabel(r'Relative error [%]')
fig.tight_layout()
fname = path + '_pk_{}_vs_{}.pdf'.format(reference_name, candidate_name)
fig.savefig(fname, bbox_inches='tight')
plt.close(fig)
# Store parameters (contained in self.scenario) to text file
self.store_ini_file(path)
def has_tensor(input_dict):
if 'modes' in list(input_dict.keys()):
if input_dict['modes'].find('t') != -1:
return True
else:
return False
return False
if __name__ == '__main__':
toto = TestClass()
unittest.main()
| 24,909 | 39.702614 | 159 | py |
class_DMDR | class_DMDR-master/python/setup.py | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as nm
import os
import subprocess as sbp
import os.path as osp
# Recover the gcc compiler
GCCPATH_STRING = sbp.Popen(
['gcc', '-print-libgcc-file-name'],
stdout=sbp.PIPE).communicate()[0]
GCCPATH = osp.normpath(osp.dirname(GCCPATH_STRING)).decode()
# DMDR modification: added GSL/BLAS
liblist = ["class", "gsl", "gslcblas"]
MVEC_STRING = sbp.Popen(
['gcc', '-lmvec'],
stderr=sbp.PIPE).communicate()[1]
if b"mvec" not in MVEC_STRING:
liblist += ["mvec","m"]
# define absolute paths
root_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
include_folder = os.path.join(root_folder, "include")
classy_folder = os.path.join(root_folder, "python")
heat_folder = os.path.join(os.path.join(root_folder, "external"),"heating")
recfast_folder = os.path.join(os.path.join(root_folder, "external"),"RecfastCLASS")
hyrec_folder = os.path.join(os.path.join(root_folder, "external"),"HyRec2020")
# Recover the CLASS version
with open(os.path.join(include_folder, 'common.h'), 'r') as v_file:
for line in v_file:
if line.find("_VERSION_") != -1:
# get rid of the " and the v
VERSION = line.split()[-1][2:-1]
break
# Define cython extension and fix Python version
# DMDR modification: add path to GSL/BLAS below in library_dirs
# GSL stuff is also located in /cm/shared/sw/pkg-old/base/gsl/
classy_ext = Extension("classy", [os.path.join(classy_folder, "classy.pyx")],
include_dirs=[nm.get_include(), include_folder, heat_folder, recfast_folder, hyrec_folder],
libraries=liblist,
library_dirs=[root_folder, GCCPATH,'/mnt/xfs1/flatiron-sw/pkg/base/gsl/2.3/lib64'],
extra_link_args=['-lgomp']
)
import sys
classy_ext.cython_directives = {'language_level': "3" if sys.version_info.major>=3 else "2"}
setup(
name='classy',
version=VERSION,
description='Python interface to the Cosmological Boltzmann code CLASS',
url='http://www.class-code.net',
cmdclass={'build_ext': build_ext},
ext_modules=[classy_ext],
#data_files=[('bbn', ['../bbn/sBBN.dat'])]
)
| 2,316 | 35.777778 | 118 | py |
class_DMDR | class_DMDR-master/python/extract_errors.py | # From the dumped stdout and stderr of a nosetests test_class.py, extract all
# the failed steps.
# Usage: python extract_errors.py output
from __future__ import print_function
import sys
import os
def main(path):
"""
Create a shorter file containing only the errors from nosetests
"""
assert os.path.isfile(path) is True
trimmed_path = path + '_errors'
destination = open(trimmed_path, 'w')
contains_error = False
with open(path, 'r') as source:
text = source.readlines()
start = 0
for index, line in enumerate(text):
if line.find('------------------') != -1:
if text[index+2].find('----------------') != -1:
stop = index-1
# Check that an error is contained
if stop > 0:
for i in range(start, stop+1):
if text[i].startswith('E'):
contains_error = True
if contains_error:
print('Found an error')
for i in range(start, stop+1):
print(text[i], end=' ')
destination.write(text[i])
start = index
contains_error = False
elif text[index+2].find('=================') != -1:
break
else:
pass
destination.close()
if __name__ == "__main__":
print(sys.argv)
if len(sys.argv) != 2:
print('Please specify the output file to analyse')
exit()
else:
main(sys.argv[-1])
| 1,664 | 31.019231 | 77 | py |
class_DMDR | class_DMDR-master/python/interface_generator.py | """
Automatically reads header files to generate an interface
"""
from __future__ import division, print_function
import sys
import logging
try:
from collections import OrderedDict as od
except ImportError:
try:
from ordereddict import OrderedDict as od
except ImportError:
raise ImportError(
"If you are running with Python v2.5 or 2.6"
" you need to manually install the ordereddict"
" package.")
try:
import colorlog
except ImportError:
raise ImportError(
"You have to install the colorlog module"
" with pip, or easy-install.")
SPACING = ' '
NAMING_CONVENTION = {
'precision': {'python': 'precision',
'function': 'precision'},
'background': {'python': 'background',
'function': 'background'},
'thermo': {'python': 'thermodynamics',
'function': 'thermodynamics'},
'perturbs': {'python': 'perturbations',
'function': 'perturb'},
'transfers': {'python': 'transfer',
'function': 'transfer'},
'primordial': {'python': 'primordial',
'function': 'primordial'},
'spectra': {'python': 'spectra',
'function': 'spectra'},
'lensing': {'python': 'lensing',
'function': 'lensing'},
'nonlinear': {'python': 'nonlinear',
'function': 'nonlinear'},
'output': {'python': 'output',
'function': 'output'},
}
def main():
# create logger
logger = create_logger()
# Recover all sub-header files
main_header = '../include/class.h'
headers = []
with open(main_header, 'r') as header_file:
in_modules = False
for line in header_file:
if in_modules:
if line.strip() == '':
in_modules = False
continue
if line.find('common') == -1 and line.find('input') == -1:
headers.append(
'../include/%s' % line.split()[-1].strip('"'))
if line.find('class modules') != -1:
in_modules = True
logger.info('Extracted the following headers: %s', ', '.join(headers))
output = 'classy.pyx'
logger.info('Creating %s', output)
structs = od()
output_file = open(output, 'w')
write_imports(output_file)
output_file.write('cdef extern from "class.h":\n')
# First write the first non automatic bits
output_file.write(
SPACING+'ctypedef char FileArg[40]\n' +
SPACING+'ctypedef char* ErrorMsg\n' +
SPACING+'cdef struct precision:\n' +
2*SPACING+'ErrorMsg error_message\n\n' +
SPACING+'cdef int _FAILURE_\n' +
SPACING+'cdef int _FALSE_\n' +
SPACING+'cdef int _TRUE_\n')
for header in headers:
extract_headers(header, structs, output_file, logger)
logger.info("Finished extracting headers")
for struct_name, struct in structs.items():
create_wrapper_class(struct_name, struct, output_file, logger)
return
def extract_headers(header, structs, output_file, logger):
"""toto"""
# Initialise the two flags controlling the exploration of the main
# structure
in_struct, main_struct_finished = False, False
# Flags for exploring enums (only the ones before the struct)
in_enum = False
# flag dealing with extracting docstrings
comment_partially_recovered = False
# Flag keeping track of multiple variables
multiple_var = False
# Flag recovering the functions
in_function_definitions, in_function, in_init = False, False, False
with open(header, 'r') as header_file:
logger.info("reading %s" % header)
for line in header_file:
# First case, recover the enums
if not main_struct_finished and not in_struct:
if line.find("enum ") != -1 and line.find("{") != -1:
enum_members = []
if line.find(";") == -1:
in_enum = True
enum_name = line.strip("enum").strip().strip('{')
else:
in_enum = False
line = line.strip("enum").strip().strip(';')
enum_name, enum_sign = line.split(' ', 1)
enum_sign = enum_sign.strip('}').strip('{')
for elem in enum_sign.split(','):
enum_members.append(elem.strip())
output_file.write(
SPACING + 'cdef enum %s:\n' % enum_name)
for elem in enum_members:
output_file.write(2*SPACING + elem + '\n')
output_file.write('\n')
elif in_enum:
if line.find('};') != -1:
in_enum = False
output_file.write(
SPACING + 'cdef enum %s:\n' % enum_name)
for elem in enum_members:
output_file.write(2*SPACING + elem + '\n')
output_file.write('\n')
else:
if line.strip() != '':
enum_members.append(line.split()[0].strip().strip(','))
if line.find("struct ") != -1 and not main_struct_finished:
in_struct = True
# Recover the name
logger.debug("in struct: %s" % line)
struct_name = line.strip().split()[1]
logger.debug("struct name: %s" % struct_name)
structs[struct_name] = {}
structs[struct_name].update(
NAMING_CONVENTION[struct_name])
output_file.write("%scdef struct %s:\n" % (
SPACING, struct_name))
continue
elif in_struct:
if line.find("};\n") != -1:
output_file.write('\n')
in_struct, main_struct_finished = False, True
else:
# if the line is not empty or does not contain only a
# comment:
if line.strip() == '' or line.strip()[:2] == '/*':
continue
logger.debug(
"potentially non empty line: %s" % line.strip())
#elif line.find('/**') != -1 or line.find('*/') != -1:
#continue
if line.find(';') == -1 and not comment_partially_recovered:
logger.debug("--> Discarded")
continue
elif line.find(';') != -1 and not comment_partially_recovered:
var_doc = ''
var_part, begin_comment = line.strip().split(';', 1)
var_doc += begin_comment.strip()[4:].strip()
# 2 things can happen: there can be arrays, and there
# can be several variables defined in one line...
# If array, slightly more complex
if var_part.find('*') != -1:
# if no comma is found, it means it is a single
# variable: good !
if var_part.find(',') == -1:
# remove if commented (starts with /*)
if var_part[:2] in ['/*', '//']:
continue
multiple_var = False
var_type, var_stars, var_name = var_part.strip().split()
structs[struct_name][var_name] = [
var_type, var_stars]
else:
# Count how many variables are defined
multiple_var = True
all_vars = [elem.strip() for elem in
var_part.split('*')[-1].split(',')]
var_type, var_stars = (var_part.strip().
split()[:2])
for var in all_vars:
structs[struct_name][var] = [
var_type, var_stars]
else:
# Again check for more than one variable
var_stars = ''
if var_part.find(',') == -1:
multiple_var = False
var_type, var_name = var_part.strip().split(' ', 1)
# Check if enum
if var_type == 'enum':
enum_name, var_name = var_name.split()
var_type += ' '+enum_name
structs[struct_name][var_name] = [
var_type, var_stars]
else:
multiple_var = True
all_vars = [elem.strip() for elem in
var_part.split()[2:].split(',')]
var_type = (var_part.strip().split()[0])
for var in all_vars:
structs[struct_name][var] = [
var_type, var_stars]
# If the comment is finished, pass
if var_doc[-2:] != '*/':
comment_partially_recovered = True
else:
var_doc = var_doc[:-2].replace('\\f$', '$').strip()
structs[struct_name][var_name].append(var_doc)
logger.debug(
"extracted the variable %s, " % var_name +
"of type %s, with docstring: %s" % (
''.join([var_stars, var_type]), var_doc))
if not multiple_var:
output_file.write(2*SPACING+' '.join(
[elem for elem in [var_type, var_stars, var_name]
if elem])+'\n')
else:
for var in all_vars:
output_file.write(2*SPACING+' '.join(
[elem for elem in [var_type, var_stars, var]
if elem])+'\n')
if comment_partially_recovered:
logger.debug("--> Accepted")
var_doc += ' '+line.strip()
if var_doc[-2:] == '*/':
comment_partially_recovered = False
var_doc = var_doc[:-2].replace('\\f$', '$').strip()
structs[struct_name][var_name].append(var_doc)
logger.debug(
"extracted the variable %s, " % var_name +
"of type %s, with docstring: %s" % (
''.join([var_stars, var_type]), var_doc))
elif main_struct_finished:
if line.find('extern "C"') != -1:
in_function_definitions = True
if not in_function_definitions:
continue
else:
if line.find('(') != -1:
in_function = True
logger.debug("Found a function")
func_type, func_name = line.split('(')[0].strip().split()
logger.debug('%s %s' % (func_name, func_type))
func_param = []
if func_name == structs[struct_name]['function']+'_init':
logger.info("found the init function")
in_init = True
structs[struct_name]['init'] = [func_name]
output_file.write(SPACING+'%s %s(' % (
func_type, func_name))
elif in_function:
# recover the signature of the function
line = line.strip().strip(',')
if line.find('struct') != -1:
if in_init:
name = line.split('*')[0].strip()[7:]
structs[struct_name]['init'].append(name)
func_param.append('void *')
elif line.find('*') != -1:
# Taking into account with or without spaces
temp = ''.join(line.strip(',').split())
last_star = len(temp)-temp[::-1].find('*')
func_param.append(temp[:last_star])
elif line.find(')') == -1:
if line != '':
func_param.append(line.split()[0])
else:
logger.debug('signature extracted')
in_function = False
if in_init:
in_init = False
output_file.write(', '.join(func_param) + ')\n')
elif line.find('}') != -1:
output_file.write('\n')
in_function_definitions = False
#print line.strip()
def create_wrapper_class(struct_name, struct, of, logger):
"""TODO"""
of.write('# Defining wrapper around struct %s\n' % struct_name)
of.write('cdef class %s:\n' % (
NAMING_CONVENTION[struct_name]['python'].capitalize()))
## recover the number of additional arguments:
init_name, argument_names = struct['init'][0], struct['init'][1:]
for companion in argument_names:
of.write(SPACING+'cdef %s _%s\n' % (companion, companion))
#logger.info("structure: %s, python name: %s" % (
#companion, NAMING_CONVENTION[companion]['python']))
of.write('\n')
# Define the array variables for all needed
array_variables = []
variables = []
for key, value in struct.items():
if key != 'init':
if value[1]:
array_variables.append(key)
variables.append(key)
of.write(SPACING+'cdef np.ndarray %s_arr\n' % key)
else:
variables.append(key)
of.write('\n')
# write the init
of.write(SPACING+'def __init__(self')
for companion in argument_names:
of.write(", %s py_%s" % (
NAMING_CONVENTION[companion]['python'].capitalize(), companion))
of.write('):\n\n')
# pointing the pointers where they belong
for companion in argument_names:
of.write(2*SPACING+"self._%s = py_%s._%s\n" % (
companion, companion, companion))
# Writing the call to structname_init()
of.write(2*SPACING+'%s_init(\n' % struct_name)
for companion in argument_names:
of.write(3*SPACING+'&(self._%s),\n' % companion)
of.write(3*SPACING+'&(self._%s))\n\n' % struct_name)
#of.write(2*SPACING+'%s_init(&(self._%s))\n\n' % (
#struct_name, struct_name))
for array in array_variables:
of.write(2*SPACING+'# Wrapping %s\n' % array)
of.write(2*SPACING+'%s_wrapper = ArrayWrapper()\n' % array)
of.write(
2*SPACING+"%s_wrapper.set_data(%d, '%s', "
"<void*> self._%s.%s)\n" % (
array, 2, struct[array].strip('*'), struct_name, array))
of.write(
2*SPACING+'self.%s_arr = np.array(%s_wrapper, '
'copy=False)\n' % (
array, array))
of.write(2*SPACING+'self.%s_arr.base = '
'<PyObject*> %s_wrapper\n' % (
array, array))
of.write(2*SPACING+'Py_INCREF(%s_wrapper)\n\n' % array)
#raise NotImplementedError('multiple init are not supported')
# Write the properties
for key in variables:
of.write(SPACING+'property %s:\n' % key)
if key not in array_variables:
of.write(2*SPACING+'def __get__(self):\n')
of.write(3*SPACING+'return self._%s.%s\n' % (struct_name, key))
of.write(2*SPACING+'def __set__(self, rhs):\n')
of.write(3*SPACING+'self._%s.%s = rhs\n' % (struct_name, key))
else:
of.write(2*SPACING+'def __get__(self):\n')
of.write(3*SPACING+'return self.%s_arr\n' % key)
of.write(2*SPACING+'def __set__(self, rhs):\n')
of.write(3*SPACING+'self.%s_arr[:] = rhs\n' % key)
of.write('\n')
# Add blank lines
of.write('\n\n')
def write_imports(output_file):
"""TODO"""
a = '''# Author: Gael Varoquaux
# License: BSD
from libc.stdlib cimport free
from cpython cimport PyObject, Py_INCREF
# Import the Python-level symbols of numpy
import numpy as np
# Import the C-level symbols of numpy
cimport numpy as np
# Numpy must be initialized. When using numpy from C or Cython you must
# _always_ do that, or you will have segfaults
np.import_array()
cdef class ArrayWrapper:
cdef void* data_ptr
cdef int size
cdef int type
cdef set_data(self, int size, char* type, void* data_ptr):
""" Set the data of the array
This cannot be done in the constructor as it must recieve C-level
arguments.
Parameters:
-----------
size: int
Length of the array.
data_ptr: void*
Pointer to the data
"""
self.data_ptr = data_ptr
self.size = size
if type.find('int') != -1:
self.type = np.NPY_INT
elif type.find('float') != -1:
self.type = np.NPY_FLOAT
elif type.find('double') != -1:
self.type = np.NPY_DOUBLE
elif type.find('long') != -1:
self.type = np.NPY_LONG
def __array__(self):
""" Here we use the __array__ method, that is called when numpy
tries to get an array from the object."""
cdef np.npy_intp shape[1]
shape[0] = <np.npy_intp> self.size
# Create a 1D array, of length 'size'
ndarray = np.PyArray_SimpleNewFromData(1, shape,
self.type, self.data_ptr)
return ndarray
def __dealloc__(self):
""" Frees the array. This is called by Python when all the
references to the object are gone. """
free(<void*>self.data_ptr)\n\n'''
output_file.write(a)
def create_logger():
"""Nothing"""
logger = logging.getLogger('simple_example')
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
# create console handler and set level to debug
console_handler = logging.StreamHandler()
#console_handler.setLevel(logging.DEBUG)
console_handler.setLevel(logging.INFO)
# create formatter
#formatter = logging.Formatter(
#"%(asctime)s %(module)s: L%(lineno) 4s %(funcName) 15s"
#" | %(levelname) -10s --> %(message)s")
formatter = colorlog.ColoredFormatter(
"%(asctime)s %(module)s: L%(lineno) 4s %(blue)s%(funcName) 15s%(reset)s"
" | %(log_color)s%(levelname) -10s --> %(message)s%(reset)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
# add formatter to console_handler
console_handler.setFormatter(formatter)
# add console_handler to logger
logger.addHandler(console_handler)
return logger
if __name__ == "__main__":
sys.exit(main())
| 20,191 | 40.462012 | 88 | py |
class_DMDR | class_DMDR-master/scripts/thermo.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
import math
# In[ ]:
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
# In[ ]:
common_settings = {'output' : 'tCl',
# LambdaCDM parameters
'h':0.6781,
'omega_b':0.02238280,
'omega_cdm':0.1201075,
'A_s':2.100549e-09,
'n_s':0.9660499,
'tau_reio':0.05430842,
'thermodynamics_verbose':1
}
##############
#
# call CLASS
#
###############
M = Class()
M.set(common_settings)
M.compute()
derived = M.get_current_derived_parameters(['tau_rec','conformal_age'])
thermo = M.get_thermodynamics()
print (thermo.keys())
# In[ ]:
tau = thermo['conf. time [Mpc]']
g = thermo['g [Mpc^-1]']
# to make the reionisation peak visible, rescale g by 100 for late times
g[:500] *= 100
#################
#
# start plotting
#
#################
#
plt.xlim([1.e2,derived['conformal_age']])
plt.xlabel(r'$\tau \,\,\, \mathrm{[Mpc]}$')
plt.ylabel(r'$\mathrm{visibility} \,\,\, g \,\,\, [\mathrm{Mpc}^{-1}]$')
plt.axvline(x=derived['tau_rec'],color='k')
# The conformal time at reionisation could be extracted from the code.
# But we know it because it is part of the standard output
# when thermodynamics_verbose=1
plt.axvline(x=4255.316282,color='k')
#
# Print functions one by one, saving between each (for slides)
#
plt.semilogx(tau,g,'r',label=r'$\psi$')
# In[ ]:
plt.savefig('thermo.pdf',bbox_inches='tight')
| 2,008 | 22.091954 | 72 | py |
class_DMDR | class_DMDR-master/scripts/cltt_terms.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
from classy import Class
from math import pi
# In[ ]:
#############################################
#
# Cosmological parameters and other CLASS parameters
#
common_settings = {# LambdaCDM parameters
'h':0.67810,
'omega_b':0.02238280,
'omega_cdm':0.1201075,
'A_s':2.100549e-09,
'n_s':0.9660499,
'tau_reio':0.05430842 ,
# output and precision parameters
'output':'tCl,pCl,lCl',
'lensing':'yes',
'l_max_scalars':5000}
#
M = Class()
#
###############
#
# call CLASS for the total Cl's and then for each contribution
#
###############
#
M.set(common_settings)
M.compute()
cl_tot = M.raw_cl(3000)
cl_lensed = M.lensed_cl(3000)
M.empty() # reset input
#
M.set(common_settings) # new input
M.set({'temperature contributions':'tsw'})
M.compute()
cl_tsw = M.raw_cl(3000)
M.empty()
#
M.set(common_settings)
M.set({'temperature contributions':'eisw'})
M.compute()
cl_eisw = M.raw_cl(3000)
M.empty()
#
M.set(common_settings)
M.set({'temperature contributions':'lisw'})
M.compute()
cl_lisw = M.raw_cl(3000)
M.empty()
#
M.set(common_settings)
M.set({'temperature contributions':'dop'})
M.compute()
cl_dop = M.raw_cl(3000)
M.empty()
# In[ ]:
# modules and settings for the plot
#
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
# In[ ]:
#################
#
# start plotting
#
#################
#
plt.xlim([2,3000])
plt.xlabel(r"$\ell$")
plt.ylabel(r"$\ell (\ell+1) C_l^{TT} / 2 \pi \,\,\, [\times 10^{10}]$")
plt.grid()
#
ell = cl_tot['ell']
factor = 1.e10*ell*(ell+1.)/2./pi
plt.semilogx(ell,factor*cl_tsw['tt'],'c-',label=r'$\mathrm{T+SW}$')
plt.semilogx(ell,factor*cl_eisw['tt'],'r-',label=r'$\mathrm{early-ISW}$')
plt.semilogx(ell,factor*cl_lisw['tt'],'y-',label=r'$\mathrm{late-ISW}$')
plt.semilogx(ell,factor*cl_dop['tt'],'g-',label=r'$\mathrm{Doppler}$')
plt.semilogx(ell,factor*cl_tot['tt'],'r-',label=r'$\mathrm{total}$')
plt.semilogx(ell,factor*cl_lensed['tt'],'k-',label=r'$\mathrm{lensed}$')
#
plt.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
# In[ ]:
plt.savefig('cltt_terms.pdf',bbox_inches='tight')
| 2,635 | 21.529915 | 73 | py |
class_DMDR | class_DMDR-master/scripts/varying_neff.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
import math
# In[ ]:
############################################
#
# Varying parameter (others fixed to default)
#
var_name = 'N_ur'
var_array = np.linspace(3.044,5.044,5)
var_num = len(var_array)
var_legend = r'$N_\mathrm{eff}$'
var_figname = 'neff'
#
# Constraints to be matched
#
# As explained in the "Neutrino cosmology" book, CUP, Lesgourgues et al., section 5.3, the goal is to vary
# - omega_cdm by a factor alpha = (1 + coeff*Neff)/(1 + coeff*3.046)
# - h by a factor sqrt*(alpha)
# in order to keep a fixed z_equality(R/M) and z_equality(M/Lambda)
#
omega_b = 0.0223828
omega_cdm_standard = 0.1201075
h_standard = 0.67810
#
# coefficient such that omega_r = omega_gamma (1 + coeff*Neff),
# i.e. such that omega_ur = omega_gamma * coeff * Neff:
# coeff = omega_ur/omega_gamma/Neff_standard
# We could extract omega_ur and omega_gamma on-the-fly within th script,
# but for simplicity we did a preliminary interactive run with background_verbose=2
# and we copied the values given in the budget output.
#
coeff = 1.70961e-05/2.47298e-05/3.044
print ("coeff=",coeff)
#
#############################################
#
# Fixed settings
#
common_settings = {# fixed LambdaCDM parameters
'omega_b':omega_b,
'A_s':2.100549e-09,
'n_s':0.9660499,
'tau_reio':0.05430842,
# output and precision parameters
'output':'tCl,pCl,lCl,mPk',
'lensing':'yes',
'P_k_max_1/Mpc':3.0,
'l_switch_limber':9}
#
##############################################
#
# loop over varying parameter values
#
M = {}
#
for i, N_ur in enumerate(var_array):
#
# rescale omega_cdm and h
#
alpha = (1.+coeff*N_ur)/(1.+coeff*3.044)
omega_cdm = (omega_b + omega_cdm_standard)*alpha - omega_b
h = h_standard*math.sqrt(alpha)
print (' * Compute with %s=%e, %s=%e, %s=%e'%('N_ur',N_ur,'omega_cdm',omega_cdm,'h',h))
#
# call CLASS
#
M[i] = Class()
M[i].set(common_settings)
M[i].set({'N_ur':N_ur})
M[i].set({'omega_cdm':omega_cdm})
M[i].set({'h':h})
M[i].compute()
# In[ ]:
# esthetic definitions for the plots
font = {'size' : 24, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
# In[ ]:
#############################################
#
# extract spectra and plot them
#
#############################################
kvec = np.logspace(-4,np.log10(3),1000) # array of kvec in h/Mpc
twopi = 2.*math.pi
#
# Create figures
#
fig_Pk, ax_Pk = plt.subplots()
fig_TT, ax_TT = plt.subplots()
#
# loop over varying parameter values
#
ll = {}
clM = {}
clTT = {}
pkM = {}
legarray = []
for i, N_ur in enumerate(var_array):
#
alpha = (1.+coeff*N_ur)/(1.+coeff*3.044)
h = 0.67810*math.sqrt(alpha) # this is h
#
# deal with colors and legends
#
if i == 0:
var_color = 'k'
var_alpha = 1.
else:
var_color = plt.cm.Reds(0.8*i/(var_num-1))
#
# get Cls
#
clM[i] = M[i].lensed_cl(2500)
ll[i] = clM[i]['ell'][2:]
clTT[i] = clM[i]['tt'][2:]
#
# store P(k) for common k values
#
pkM[i] = []
# The function .pk(k,z) wants k in 1/Mpc so we must convert kvec for each case with the right h
khvec = kvec*h # This is k in 1/Mpc
for kh in khvec:
pkM[i].append(M[i].pk(kh,0.)*h**3)
#
# plot P(k)
#
if i == 0:
ax_Pk.semilogx(kvec,np.array(pkM[i])/np.array(pkM[0]),
color=var_color,#alpha=var_alpha,
linestyle='-')
else:
ax_Pk.semilogx(kvec,np.array(pkM[i])/np.array(pkM[0]),
color=var_color,#alpha=var_alpha,
linestyle='-',
label=r'$\Delta N_\mathrm{eff}=%g$'%(N_ur-3.044))
#
# plot C_l^TT
#
if i == 0:
ax_TT.semilogx(ll[i],clTT[i]/clTT[0],
color=var_color,alpha=var_alpha,linestyle='-')
else:
ax_TT.semilogx(ll[i],clTT[i]/clTT[0],
color=var_color,alpha=var_alpha,linestyle='-',
label=r'$\Delta N_\mathrm{eff}=%g$'%(N_ur-3.044))
#
# output of P(k) figure
#
ax_Pk.set_xlim([1.e-3,3.])
ax_Pk.set_ylim([0.98,1.20])
ax_Pk.set_xlabel(r'$k \,\,\,\, [h^{-1}\mathrm{Mpc}]$')
ax_Pk.set_ylabel(r'$P(k)/P(k)[N_\mathrm{eff}=3.046]$')
ax_Pk.legend(loc='upper left')
fig_Pk.tight_layout()
fig_Pk.savefig('ratio-%s-Pk.pdf' % var_figname)
#
# output of C_l^TT figure
#
ax_TT.set_xlim([2,2500])
ax_TT.set_ylim([0.850,1.005])
ax_TT.set_xlabel(r'$\mathrm{Multipole} \,\,\,\, \ell$')
ax_TT.set_ylabel(r'$C_\ell^\mathrm{TT}/C_\ell^\mathrm{TT}(N_\mathrm{eff}=3.046)$')
ax_TT.legend(loc='lower left')
fig_TT.tight_layout()
fig_TT.savefig('ratio-%s-cltt.pdf' % var_figname)
| 5,240 | 25.876923 | 106 | py |
class_DMDR | class_DMDR-master/scripts/Growth_with_w.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy import interpolate
# In[ ]:
w0vec = [-0.7, -1.0, -1.3]
wavec = [-0.2,0.0,0.2]
#w0vec = [-1.0]
#wavec = [0.0]
cosmo = {}
for w0 in w0vec:
for wa in wavec:
if w0==-1.0 and wa==0.0:
M='LCDM'
else:
M = '('+str(w0)+','+str(wa)+')'
cosmo[M] = Class()
cosmo[M].set({'input_verbose':1,'background_verbose':1,'gauge' : 'Newtonian'})
if M!='LCDM':
cosmo[M].set({'Omega_Lambda':0.,'w0_fld':w0,'wa_fld':wa})
cosmo[M].compute()
# In[ ]:
import scipy
import scipy.special
import scipy.integrate
def D_hypergeom(avec,csm):
bg = csm.get_background()
Om = csm.Omega0_m()
if '(.)rho_lambda' in bg:
Ol = bg['(.)rho_lambda'][-1]/bg['(.)rho_crit'][-1]
else:
Ol = bg['(.)rho_fld'][-1]/bg['(.)rho_crit'][-1]
x = Ol/Om*avec**3
D = avec*scipy.special.hyp2f1(1./3.,1,11./6.,-x)
D_today = scipy.special.hyp2f1(1./3.,1,11./6.,-Ol/Om)
return D/D_today
def f_hypergeom(avec,csm):
bg = csm.get_background()
Om = csm.Omega0_m()
if '(.)rho_lambda' in bg:
Ol = bg['(.)rho_lambda'][-1]/bg['(.)rho_crit'][-1]
else:
Ol = bg['(.)rho_fld'][-1]/bg['(.)rho_crit'][-1]
x = Ol/Om*avec**3
D = avec*scipy.special.hyp2f1(1./3.,1,11./6.,-x)
f = 1.-6./11.*x*avec/D*scipy.special.hyp2f1(4./3.,2,17./6.,-x)
return f
def D_integral2(avec,csm):
bg = csm.get_background()
Om = csm.Omega0_m()
if '(.)rho_lambda' in bg:
Ol = bg['(.)rho_lambda'][-1]/bg['(.)rho_crit'][-1]
w0 = -1
wa = 0.0
else:
Ol = bg['(.)rho_fld'][-1]/bg['(.)rho_crit'][-1]
w0 = csm.pars['w0_fld']
wa = csm.pars['wa_fld']
D = np.zeros(avec.shape)
for idx, a in enumerate(avec):
Hc = a*np.sqrt(Om/a**3 + Ol*a**(-3*(1+w0+wa))*np.exp(-3.*(1.0-a)*wa) )
Dintegrand2 = lambda a: (a*np.sqrt(Om/a**3 + Ol*a**(-3*(1+w0+wa))*np.exp(-3.*(1.0-a)*wa) ))**(-3)
I = scipy.integrate.quad(Dintegrand2, 1e-15,a)
D[idx] = Hc/a*I[0]
D = D/scipy.integrate.quad(Dintegrand2,1e-15,1)[0]
return D
def D_integral(avec,csm):
bg = csm.get_background()
Om = csm.Omega0_m()
Ol = bg['(.)rho_lambda'][-1]/bg['(.)rho_crit'][-1]
Or = 1-Om-Ol
def Dintegrand(a):
Hc = np.sqrt(Om/a+Ol*a*a+Or/a/a)
#print a,Hc
return Hc**(-3)
D = np.zeros(avec.shape)
for idx, a in enumerate(avec):
#if a<1e-4:
# continue
Hc = np.sqrt(Om/a+Ol*a*a+Or/a/a)
I = scipy.integrate.quad(Dintegrand,1e-15,a,args=())
D[idx] = Hc/a*I[0]
D = D/scipy.integrate.quad(Dintegrand,1e-15,1,args=())[0]
return D
def D_linder(avec,csm):
bg = csm.get_background()
if '(.)rho_lambda' in bg:
Ol = bg['(.)rho_lambda'][-1]/bg['(.)rho_crit'][-1]
w0 = -1
wa = 0.0
else:
Ol = bg['(.)rho_fld'][-1]/bg['(.)rho_crit'][-1]
w0 = csm.pars['w0_fld']
wa = csm.pars['wa_fld']
Om_of_a = (bg['(.)rho_cdm']+bg['(.)rho_b'])/bg['H [1/Mpc]']**2
gamma = 0.55+0.05*(w0+0.5*wa)
a_bg = 1./(1.+bg['z'])
integ = (Om_of_a**gamma-1.)/a_bg
integ_interp = interpolate.interp1d(a_bg,integ)
D = np.zeros(avec.shape)
amin = min(a_bg)
amin = 1e-3
for idx, a in enumerate(avec):
if a<amin:
D[idx] = a
else:
I = scipy.integrate.quad(integ_interp,amin,a,args=())
D[idx] = a*np.exp(I[0])
# D = D/scipy.integrate.quad(Dintegrand,1e-15,1,args=())[0]
return D
def D_linder2(avec,csm):
bg = csm.get_background()
if '(.)rho_lambda' in bg:
Ol = bg['(.)rho_lambda'][-1]/bg['(.)rho_crit'][-1]
w0 = -1
wa = 0.0
rho_de = bg['(.)rho_lambda']
else:
Ol = bg['(.)rho_fld'][-1]/bg['(.)rho_crit'][-1]
w0 = csm.pars['w0_fld']
wa = csm.pars['wa_fld']
rho_de = bg['(.)rho_fld']
rho_M = bg['(.)rho_cdm']+bg['(.)rho_b']
#Om_of_a = rho_M/bg['H [1/Mpc]']**2
Om_of_a = rho_M/(rho_M+rho_de)
gamma = 0.55+0.05*(1+w0+0.5*wa)
#a_bg = 1./(1.+bg['z'])
a_bg = avec
integ = (Om_of_a**gamma-1.)/a_bg
D = np.zeros(avec.shape)
for idx, a in enumerate(avec):
if idx<2:
I=0
else:
I = np.trapz(integ[:idx],x=avec[:idx])
D[idx] = a*np.exp(I)
# D = D/scipy.integrate.quad(Dintegrand,1e-15,1,args=())[0]
return D/D[-1]
def draw_vertical_redshift(csm, theaxis, var='tau',z=99,ls='-.',label='$z=99$'):
if var=='z':
xval = z
elif var=='a':
xval = 1./(z+1)
elif var=='tau':
bg = csm.get_background()
f = interpolate.interp1d(bg['z'],bg['conf. time [Mpc]'])
xval = f(z)
theaxis.axvline(xval,lw=1,ls=ls,color='k',label=label)
# In[ ]:
figwidth1 = 4.4 #=0.7*6.3
figwidth2 = 6.3
figwidth15 = 0.5*(figwidth1+figwidth2)
ratio = 8.3/11.7
figheight1 = figwidth1*ratio
figheight2 = figwidth2*ratio
figheight15 = figwidth15*ratio
lw=2
fs=12
labelfs=16
fig, (ax1, ax2) = plt.subplots(2,1,figsize=(1.2*figwidth1,figheight1/(3./5.)),sharex=True,
gridspec_kw = {'height_ratios':[3, 2]})
if False:
aminexp = -13
amin = 10**aminexp
ymin = 10**(aminexp/2.)
ymax = 10**(-aminexp/2.)
elif False:
aminexp = -7
amin = 10**aminexp
ymin = 10**(aminexp)
ymax = 10**(-aminexp)
else:
aminexp = -4
amin = 10**aminexp
ymin = 10**(aminexp-1)
ymax = 10**(-aminexp+1)
bg = cosmo['LCDM'].get_background()
a = 1./(bg['z']+1)
H = bg['H [1/Mpc]']
D = bg['gr.fac. D']
f = bg['gr.fac. f']
ax1.loglog(a,D,lw=lw,label=r'$D_+^\mathrm{approx}$')
ax1.loglog(a,D_hypergeom(a,cosmo['LCDM']),lw=lw,label=r'$D_+^\mathrm{analytic}$')
ax1.loglog(a,a*ymax,'k--',lw=lw,label=r'$\propto a$')
ax1.loglog(a,1./a*ymin,'k:',lw=lw,label=r'$\propto a^{-1}$')
ax2.semilogx(a,D/D_hypergeom(a,cosmo['LCDM']),lw=lw,label=r'$D_+/D_+^\mathrm{analytic}$')
#ax2.semilogx(a,grow/grow[-1]/D_integral(a,cosmo['CDM']),'--',lw=5)
ax2.semilogx(a,f/f_hypergeom(a,cosmo['LCDM']),lw=lw,label=r'$f/f^{\,\mathrm{analytic}}$')
draw_vertical_redshift(cosmo['LCDM'], ax1, var='a',z=99,label='$z=99$')
draw_vertical_redshift(cosmo['LCDM'], ax1, var='a',z=49,label='$z=49$',ls='-')
draw_vertical_redshift(cosmo['LCDM'], ax2, var='a',z=99,label=None)
draw_vertical_redshift(cosmo['LCDM'], ax2, var='a',z=49,label=None,ls='-')
lgd1 = ax1.legend(fontsize=fs,ncol=1,loc='upper left',
bbox_to_anchor=(1.02, 1.035))
#lgd2 = ax2.legend([r'$D_+/D_+^\mathrm{analytic}$','$z=99$'],
# fontsize=fs,ncol=1,loc='upper left',
# bbox_to_anchor=(1.0, 1.08))
lgd2 = ax2.legend(fontsize=fs,ncol=1,loc='upper left',
bbox_to_anchor=(1.02, 0.83))
ax1.set_xlim([10**aminexp,1])
ax2.set_xlabel(r'$a$',fontsize=fs)
ax1.set_ylim([ymin,ymax])
ax2.set_ylim([0.9,1.099])
ax2.axhline(1,color='k')
fig.tight_layout()
fig.subplots_adjust(hspace=0.0)
fig.savefig('NewtonianGrowthFactor.pdf',bbox_extra_artists=(lgd1,lgd2), bbox_inches='tight')
# In[ ]:
lw=2
fs=14
fig, (ax1, ax2) = plt.subplots(2,1,figsize=(6,8),sharex=True,)
# gridspec_kw = {'height_ratios':[2, 1]})
for M, csm in iter(cosmo.items()):
if M!='LCDM':
w0, wa = M.strip('()').split(',')
if float(wa)!=0.0:
continue
bg = csm.get_background()
a = 1./(bg['z']+1)
H = bg['H [1/Mpc]']
#grow = bg['grow']
#grow_prime = bg['grow_prime']
D = bg['gr.fac. D']
f = bg['gr.fac. f']
#grow_interp = interpolate.interp1d(a,grow)
#p = ax1.semilogx(a,grow/grow[-1]/a,lw=lw,label=M)
#colour = p[0].get_color()
p=ax1.semilogx(a,D_linder2(a,csm)/a,lw=lw,ls='--',label=M)
colour = p[0].get_color()
ax1.semilogx(a,D/a,lw=lw,ls='-',color=colour)
ax1.semilogx(a,D_hypergeom(a,csm)/a,lw=lw,ls=':',color=colour)
ax2.semilogx(a,D/D_integral2(a,csm),lw=lw,ls='-',color=colour)
ax2.semilogx(a,D/D_hypergeom(a,csm),lw=lw,ls=':',color=colour)
ax2.semilogx(a,D/D_linder2(a,csm),lw=lw,ls='--',color=colour)
ax1.set_xlim([1e-3,1])
ax2.set_xlabel(r'$a$',fontsize=fs)
ax1.set_ylim([0,2])
ax2.set_ylim([0.9,1.3])
lgd1 = ax1.legend(fontsize=fs,ncol=1,loc='lower left')
# bbox_to_anchor=(1.0, 1.035))
fig.tight_layout()
fig.subplots_adjust(hspace=0.0)
fig.savefig('Growthrate_w0.pdf')
| 8,550 | 26.944444 | 105 | py |
class_DMDR | class_DMDR-master/scripts/many_times.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
import math
# In[ ]:
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
# In[ ]:
#############################################
#
# User settings controlling the figure aspect
#
z_max_pk = 46000 # highest redshift involved
k_per_decade = 400 # number of k values, controls final resolution
k_min_tau0 = 40. # this value controls the minimum k value in the figure (it is k_min * tau0)
P_k_max_inv_Mpc =1.0 # this value is directly the maximum k value in the figure in Mpc
tau_num_early = 2000 # number of conformal time values before recombination, controls final resolution
tau_num_late = 200 # number of conformal time values after recombination, controls final resolution
tau_ini = 10. # first value of conformal time in Mpc
tau_label_Hubble = 20. # value of time at which we want to place the label on Hubble crossing
tau_label_ks = 40. # value of time at which we want to place the label on sound horizon crossing
tau_label_kd = 230. # value of time at which we want to place the label on damping scale crossing
#
# Cosmological parameters and other CLASS parameters
#
common_settings = {# which output? transfer functions only
'output':'mTk',
# LambdaCDM parameters
'h':0.67556,
'omega_b':0.022032,
'omega_cdm':0.12038,
'A_s':2.215e-9,
'n_s':0.9619,
'tau_reio':0.0925,
# Take fixed value for primordial Helium (instead of automatic BBN adjustment)
'YHe':0.246,
# other output and precision parameters
'z_max_pk':z_max_pk,
'k_per_decade_for_pk':k_per_decade,
'k_per_decade_for_bao':k_per_decade,
'k_min_tau0':k_min_tau0, # this value controls the minimum k value in the figure
'perturbations_sampling_stepsize':'0.05',
'P_k_max_1/Mpc':P_k_max_inv_Mpc,
'compute damping scale':'yes', # needed to output and plot Silk damping scale
'gauge':'newtonian'}
###############
#
# call CLASS
#
###############
M = Class()
M.set(common_settings)
M.compute()
#
# define conformal time sampling array
#
times = M.get_current_derived_parameters(['tau_rec','conformal_age'])
tau_rec=times['tau_rec']
tau_0 = times['conformal_age']
tau1 = np.logspace(math.log10(tau_ini),math.log10(tau_rec),tau_num_early)
tau2 = np.logspace(math.log10(tau_rec),math.log10(tau_0),tau_num_late)[1:]
tau2[-1] *= 0.999 # this tiny shift avoids interpolation errors
tau = np.concatenate((tau1,tau2))
tau_num = len(tau)
#
# use table of background and thermodynamics quantitites to define some functions
# returning some characteristic scales
# (of Hubble crossing, sound horizon crossing, etc.) at different time
#
background = M.get_background() # load background table
#print background.viewkeys()
thermodynamics = M.get_thermodynamics() # load thermodynamics table
#print thermodynamics.viewkeys()
#
background_tau = background['conf. time [Mpc]'] # read conformal times in background table
background_z = background['z'] # read redshift
background_aH = 2.*math.pi*background['H [1/Mpc]']/(1.+background['z'])/M.h() # read 2pi * aH in [h/Mpc]
background_ks = 2.*math.pi/background['comov.snd.hrz.']/M.h() # read 2pi/(comoving sound horizon) in [h/Mpc]
background_rho_m_over_r = (background['(.)rho_b']+background['(.)rho_cdm']) /(background['(.)rho_g']+background['(.)rho_ur']) # read rho_r / rho_m (to find time of equality)
background_rho_l_over_m = background['(.)rho_lambda'] /(background['(.)rho_b']+background['(.)rho_cdm']) # read rho_m / rho_lambda (to find time of equality)
thermodynamics_tau = thermodynamics['conf. time [Mpc]'] # read confromal times in thermodynamics table
thermodynamics_kd = 2.*math.pi/thermodynamics['r_d']/M.h() # read 2pi(comoving diffusion scale) in [h/Mpc]
#
# define a bunch of interpolation functions based on previous quantities
#
background_z_at_tau = interp1d(background_tau,background_z)
background_aH_at_tau = interp1d(background_tau,background_aH)
background_ks_at_tau = interp1d(background_tau,background_ks)
background_tau_at_mr = interp1d(background_rho_m_over_r,background_tau)
background_tau_at_lm = interp1d(background_rho_l_over_m,background_tau)
thermodynamics_kd_at_tau = interp1d(thermodynamics_tau, thermodynamics_kd)
#
# infer arrays of characteristic quantitites calculated at values of conformal time in tau array
#
aH = background_aH_at_tau(tau)
ks = background_ks_at_tau(tau)
kd = thermodynamics_kd_at_tau(tau)
#
# infer times of R/M and M/Lambda equalities
#
tau_eq = background_tau_at_mr(1.)
tau_lambda = background_tau_at_lm(1.)
#
# check and inform user whether intiial arbitrary choice of z_max_pk was OK
max_z_needed = background_z_at_tau(tau[0])
if max_z_needed > z_max_pk:
print ('you must increase the value of z_max_pk to at least ',max_z_needed)
() + 1 # this strange line is just a trick to stop the script execution there
else:
print ('in a next run with the same values of tau, you may decrease z_max_pk from ',z_max_pk,' to ',max_z_needed)
#
# get transfer functions at each time and build arrays Theta0(tau,k) and phi(tau,k)
#
for i in range(tau_num):
one_time = M.get_transfer(background_z_at_tau(tau[i])) # transfer functions at each time tau
if i ==0: # if this is the first time in the loop: create the arrays (k, Theta0, phi)
k = one_time['k (h/Mpc)']
k_num = len(k)
Theta0 = np.zeros((tau_num,k_num))
phi = np.zeros((tau_num,k_num))
Theta0[i,:] = 0.25*one_time['d_g'][:]
phi[i,:] = one_time['phi'][:]
#
# find the global extra of Theta0(tau,k) and phi(tau,k), used to define color code later
#
Theta_amp = max(Theta0.max(),-Theta0.min())
phi_amp = max(phi.max(),-phi.min())
#
# reshaping of (k,tau) necessary to call the function 'pcolormesh'
#
K,T = np.meshgrid(k,tau)
#
# inform user of the size of the grids (related to the figure resolution)
#
print ('grid size:',len(k),len(tau),Theta0.shape)
#
#################
#
# start plotting
#
#################
#
fig = plt.figure(figsize=(18,8))
#
# plot Theta0(k,tau)
#
ax_Theta = fig.add_subplot(121)
print ('> Plotting Theta_0')
fig_Theta = ax_Theta.pcolormesh(K,T,Theta0,cmap='coolwarm',vmin=-Theta_amp,vmax=Theta_amp,shading='auto')
print ('> Done')
#
# plot lines (characteristic times and scales)
#
ax_Theta.axhline(y=tau_rec,color='k',linestyle='-')
ax_Theta.axhline(y=tau_eq,color='k',linestyle='-')
ax_Theta.axhline(y=tau_lambda,color='k',linestyle='-')
ax_Theta.plot(aH,tau,'r-',linewidth=2)
ax_Theta.plot(ks,tau,color='#FFFF33',linestyle='-',linewidth=2)
ax_Theta.plot(kd,tau,'b-',linewidth=2)
#
# dealing with labels
#
ax_Theta.set_title(r'$\Theta_0$')
ax_Theta.text(1.5*k[0],0.9*tau_rec,r'$\mathrm{rec.}$')
ax_Theta.text(1.5*k[0],0.9*tau_eq,r'$\mathrm{R/M} \,\, \mathrm{eq.}$')
ax_Theta.text(1.5*k[0],0.9*tau_lambda,r'$\mathrm{M/L} \,\, \mathrm{eq.}$')
ax_Theta.annotate(r'$\mathrm{Hubble} \,\, \mathrm{cross.}$',
xy=(background_aH_at_tau(tau_label_Hubble),tau_label_Hubble),
xytext=(0.1*background_aH_at_tau(tau_label_Hubble),0.8*tau_label_Hubble),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
ax_Theta.annotate(r'$\mathrm{sound} \,\, \mathrm{horizon} \,\, \mathrm{cross.}$',
xy=(background_ks_at_tau(tau_label_ks),tau_label_ks),
xytext=(0.07*background_aH_at_tau(tau_label_ks),0.8*tau_label_ks),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
ax_Theta.annotate(r'$\mathrm{damping} \,\, \mathrm{scale} \,\, \mathrm{cross.}$',
xy=(thermodynamics_kd_at_tau(tau_label_kd),tau_label_kd),
xytext=(0.2*thermodynamics_kd_at_tau(tau_label_kd),2.0*tau_label_kd),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
#
# dealing with axes
#
ax_Theta.set_xlim(k[0],k[-1])
ax_Theta.set_xscale('log')
ax_Theta.set_yscale('log')
ax_Theta.set_xlabel(r'$k \,\,\, \mathrm{[h/Mpc]}$')
ax_Theta.set_ylabel(r'$\tau \,\,\, \mathrm{[Mpc]}$')
ax_Theta.invert_yaxis()
#
# color legend
#
fig.colorbar(fig_Theta)
#
# plot phi(k,tau)
#
ax_phi = fig.add_subplot(122)
ax_phi.set_xlim(k[0],k[-1])
#ax_phi.pcolor(K,T,phi,cmap='coolwarm')
print ('> Plotting phi')
fig_phi = ax_phi.pcolormesh(K,T,phi,cmap='coolwarm',vmin=-0.,vmax=phi_amp,shading='auto')
print ('> Done')
#
# plot lines (characteristic times and scales)
#
ax_phi.axhline(y=tau_rec,color='k',linestyle='-')
ax_phi.axhline(y=tau_eq,color='k',linestyle='-')
ax_phi.axhline(y=tau_lambda,color='k',linestyle='-')
ax_phi.plot(aH,tau,'r-',linewidth=2)
ax_phi.plot(ks,tau,color='#FFFF33',linestyle='-',linewidth=2)
#
# dealing with labels
#
ax_phi.set_title(r'$\phi$')
ax_phi.text(1.5*k[0],0.9*tau_rec,r'$\mathrm{rec.}$')
ax_phi.text(1.5*k[0],0.9*tau_eq,r'$\mathrm{R/M} \,\, \mathrm{eq.}$')
ax_phi.text(1.5*k[0],0.9*tau_lambda,r'$\mathrm{M/L} \,\, \mathrm{eq.}$')
ax_phi.annotate(r'$\mathrm{Hubble} \,\, \mathrm{cross.}$',
xy=(background_aH_at_tau(tau_label_Hubble),tau_label_Hubble),
xytext=(0.1*background_aH_at_tau(tau_label_Hubble),0.8*tau_label_Hubble),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
ax_phi.annotate(r'$\mathrm{sound} \,\, \mathrm{horizon} \,\, \mathrm{cross.}$',
xy=(background_ks_at_tau(tau_label_ks),tau_label_ks),
xytext=(0.07*background_aH_at_tau(tau_label_ks),0.8*tau_label_ks),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
#
# dealing with axes
#
ax_phi.set_xscale('log')
ax_phi.set_yscale('log')
ax_phi.set_xlabel(r'$k \,\,\, \mathrm{[h/Mpc]}$')
ax_phi.set_ylabel(r'$\tau \,\,\, \mathrm{[Mpc]}$')
ax_phi.invert_yaxis()
#
# color legend
#
fig.colorbar(fig_phi)
#
# produce the PDF
#
#plt.show()
plt.savefig('many_times.png',dpi=300)
| 10,681 | 39.157895 | 179 | py |
class_DMDR | class_DMDR-master/scripts/distances.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
# In[ ]:
font = {'size' : 20, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
# In[ ]:
#Lambda CDM
LCDM = Class()
LCDM.set({'Omega_cdm':0.25,'Omega_b':0.05})
LCDM.compute()
# In[ ]:
#Einstein-de Sitter
CDM = Class()
CDM.set({'Omega_cdm':0.95,'Omega_b':0.05})
CDM.compute()
# Just to cross-check that Omega_Lambda is negligible
# (but not exactly zero because we neglected radiation)
derived = CDM.get_current_derived_parameters(['Omega0_lambda'])
print (derived)
print ("Omega_Lambda =",derived['Omega0_lambda'])
# In[ ]:
#Get background quantities and recover their names:
baLCDM = LCDM.get_background()
baCDM = CDM.get_background()
baCDM.keys()
# In[ ]:
#Get H_0 in order to plot the distances in this unit
fLCDM = LCDM.Hubble(0)
fCDM = CDM.Hubble(0)
# In[ ]:
namelist = ['lum. dist.','comov. dist.','ang.diam.dist.']
colours = ['b','g','r']
for name in namelist:
idx = namelist.index(name)
plt.loglog(baLCDM['z'],fLCDM*baLCDM[name],colours[idx]+'-')
plt.legend(namelist,loc='upper left')
for name in namelist:
idx = namelist.index(name)
plt.loglog(baCDM['z'],fCDM*baCDM[name],colours[idx]+'--')
plt.xlim([0.07, 10])
plt.ylim([0.08, 20])
plt.xlabel(r"$z$")
plt.ylabel(r"$\mathrm{Distance}\times H_0$")
plt.tight_layout()
# In[ ]:
plt.savefig('distances.pdf')
| 1,670 | 17.566667 | 63 | py |
class_DMDR | class_DMDR-master/scripts/neutrinohierarchy.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
# In[ ]:
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
# In[ ]:
# a function returning the three masses given the Delta m^2, the total mass, and the hierarchy (e.g. 'IN' or 'IH')
# taken from a piece of MontePython written by Thejs Brinckmann
def get_masses(delta_m_squared_atm, delta_m_squared_sol, sum_masses, hierarchy):
# any string containing letter 'n' will be considered as refering to normal hierarchy
if 'n' in hierarchy.lower():
# Normal hierarchy massive neutrinos. Calculates the individual
# neutrino masses from M_tot_NH and deletes M_tot_NH
#delta_m_squared_atm=2.45e-3
#delta_m_squared_sol=7.50e-5
m1_func = lambda m1, M_tot, d_m_sq_atm, d_m_sq_sol: M_tot**2. + 0.5*d_m_sq_sol - d_m_sq_atm + m1**2. - 2.*M_tot*m1 - 2.*M_tot*(d_m_sq_sol+m1**2.)**0.5 + 2.*m1*(d_m_sq_sol+m1**2.)**0.5
m1,opt_output,success,output_message = fsolve(m1_func,sum_masses/3.,(sum_masses,delta_m_squared_atm,delta_m_squared_sol),full_output=True)
m1 = m1[0]
m2 = (delta_m_squared_sol + m1**2.)**0.5
m3 = (delta_m_squared_atm + 0.5*(m2**2. + m1**2.))**0.5
return m1,m2,m3
else:
# Inverted hierarchy massive neutrinos. Calculates the individual
# neutrino masses from M_tot_IH and deletes M_tot_IH
#delta_m_squared_atm=-2.45e-3
#delta_m_squared_sol=7.50e-5
delta_m_squared_atm = -delta_m_squared_atm
m1_func = lambda m1, M_tot, d_m_sq_atm, d_m_sq_sol: M_tot**2. + 0.5*d_m_sq_sol - d_m_sq_atm + m1**2. - 2.*M_tot*m1 - 2.*M_tot*(d_m_sq_sol+m1**2.)**0.5 + 2.*m1*(d_m_sq_sol+m1**2.)**0.5
m1,opt_output,success,output_message = fsolve(m1_func,sum_masses/3.,(sum_masses,delta_m_squared_atm,delta_m_squared_sol),full_output=True)
m1 = m1[0]
m2 = (delta_m_squared_sol + m1**2.)**0.5
m3 = (delta_m_squared_atm + 0.5*(m2**2. + m1**2.))**0.5
return m1,m2,m3
# In[ ]:
# test of this function, returning the 3 masses for total mass of 0.1eV
m1,m2,m3 = get_masses(2.45e-3,7.50e-5,0.1,'NH')
print ('NH:',m1,m2,m3,m1+m2+m3)
m1,m2,m3 = get_masses(2.45e-3,7.50e-5,0.1,'IH')
print ('IH:',m1,m2,m3,m1+m2+m3)
# In[ ]:
# The goal of this cell is to compute the ratio of P(k) for NH and IH with the same total mass
commonsettings = {'N_ur':0,
'N_ncdm':3,
'output':'mPk',
'P_k_max_1/Mpc':3.0,
# The next line should be uncommented for higher precision (but significantly slower running)
'ncdm_fluid_approximation':3,
# You may uncomment this line to get more info on the ncdm sector from Class:
'background_verbose':1
}
# array of k values in 1/Mpc
kvec = np.logspace(-4,np.log10(3),100)
# array for storing legend
legarray = []
# loop over total mass values
for sum_masses in [0.1, 0.115, 0.13]:
# normal hierarchy
[m1, m2, m3] = get_masses(2.45e-3,7.50e-5, sum_masses, 'NH')
NH = Class()
NH.set(commonsettings)
NH.set({'m_ncdm':str(m1)+','+str(m2)+','+str(m3)})
NH.compute()
# inverted hierarchy
[m1, m2, m3] = get_masses(2.45e-3,7.50e-5, sum_masses, 'IH')
IH = Class()
IH.set(commonsettings)
IH.set({'m_ncdm':str(m1)+','+str(m2)+','+str(m3)})
IH.compute()
pkNH = []
pkIH = []
for k in kvec:
pkNH.append(NH.pk(k,0.))
pkIH.append(IH.pk(k,0.))
NH.struct_cleanup()
IH.struct_cleanup()
# extract h value to convert k from 1/Mpc to h/Mpc
h = NH.h()
plt.semilogx(kvec/h,1-np.array(pkNH)/np.array(pkIH))
legarray.append(r'$\Sigma m_i = '+str(sum_masses)+'$eV')
plt.axhline(0,color='k')
plt.xlim(kvec[0]/h,kvec[-1]/h)
plt.xlabel(r'$k [h \mathrm{Mpc}^{-1}]$')
plt.ylabel(r'$1-P(k)^\mathrm{NH}/P(k)^\mathrm{IH}$')
plt.legend(legarray)
# In[ ]:
plt.savefig('neutrinohierarchy.pdf')
| 4,339 | 34 | 191 | py |
class_DMDR | class_DMDR-master/scripts/check_PPF_approx.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
# In[ ]:
k_out = [5e-5, 5e-4, 5e-3]
models = ['PPF1','PPF2','FLD1','FLD1S']
w0 = {'PPF1':-0.7,'PPF2':-1.15,'FLD1':-0.7,'FLD1S':-0.7}
wa = {'PPF1':0.,'PPF2':0.5,'FLD1':0.,'FLD1S':0.}
omega_cdm = {'PPF1':0.104976,'PPF2':0.120376,'FLD1':0.104976,'FLD1S':0.104976}
omega_b = 0.022
##Omega_cdm = {'PPF1':0.26,'PPF2':0.21,'FLD1':0.26,'FLD1S':0.26}
##Omega_b = 0.05
h = {'PPF1':0.64,'PPF2':0.74,'FLD1':0.64,'FLD1S':0.64}
cosmo = {}
for M in models:
use_ppf = 'yes'
gauge = 'Newtonian'
if 'FLD' in M:
use_ppf = 'no'
if 'S' in M:
gauge = 'Synchronous'
cosmo[M] = Class()
cosmo[M].set({'output':'tCl mPk dTk vTk','k_output_values':str(k_out).strip('[]'),
'h':h[M],
'omega_b':omega_b,'omega_cdm':omega_cdm[M],
##'Omega_b':Omega_b,'omega_cdm':Omega_cdm[M],
'cs2_fld':1.,
'w0_fld':w0[M],'wa_fld':wa[M],'Omega_Lambda':0.,'gauge':gauge,
'use_ppf':use_ppf})
cosmo[M].compute()
# In[ ]:
colours = ['r','k','g','m']
for i,M in enumerate(models):
cl = cosmo[M].raw_cl()
l = cl['ell']
plt.loglog(l,cl['tt']*l*(l+1)/(2.*np.pi),label=M,color=colours[i])
plt.legend(loc='upper left')
plt.xlim([2,300])
plt.ylim([6e-11,1e-9])
plt.xlabel(r'$\ell$')
plt.ylabel(r'$[\ell(\ell+1)/2\pi] C_\ell^\mathrm{TT}$')
plt.savefig('check_PPF_clTT.pdf')
# In[ ]:
for M in ['PPF1','FLD1']:
csm = cosmo[M]
pt = csm.get_perturbations()
pts = pt['scalar']
for i,k in enumerate(k_out):
ptk = pts[i]
a = ptk['a']
phi = ptk['phi']
psi = ptk['psi']
if 'FLD' in M:
ls = ':'
lw=5
else:
ls = '-'
lw=1
plt.semilogx(a,0.5*(phi+psi),label=M+' '+'$k='+str(k)+'Mpc^{-1}$',ls=ls,lw=lw)
plt.legend(loc='lower left')
plt.xlim([1e-2,1])
plt.ylim([0.3,0.63])
plt.xlabel(r'$a/a_0$')
plt.ylabel(r'$\frac{1}{2} ~(\Phi+\Psi)$')
plt.savefig('check_PPF_metric.pdf')
# In[ ]:
#kminclosed = sqrt(-8*Omega_k)*(70/3e5) Mpc^(-1)
k_out = [1e-3] #[1e-4, 1e-3, 1e-2]
#models = ['PPF1','PPF2','FLD1']
models = ['PPF1','FLD1']
w0 = {'PPF1':-0.7,'PPF2':-1.15,'FLD1':-0.7,'FLD1S':-0.7}
wa = {'PPF1':0.,'PPF2':0.5,'FLD1':0.,'FLD1S':0.}
omega_cdm = {'PPF1':0.104976,'PPF2':0.120376,'FLD1':0.104976,'FLD1S':0.104976}
omega_b = 0.022
##Omega_cdm = {'PPF1':0.26,'PPF2':0.21,'FLD1':0.26,'FLD1S':0.26}
##Omega_b = 0.05
h = {'PPF1':0.64,'PPF2':0.74,'FLD1':0.64}
fig, axes = plt.subplots(1,2,figsize=(16,5))
for Omega_K in [-0.1, 0.0, 0.15]:
for gauge in ['Synchronous','Newtonian']:
cosmo = {}
for M in models:
use_ppf = 'yes'
if 'FLD' in M:
use_ppf = 'no'
cosmo[M] = Class()
cosmo[M].set({'output':'tCl mPk dTk vTk','k_output_values':str(k_out).strip('[]'),
'h':h[M],
'omega_b':omega_b,'omega_cdm':omega_cdm[M],'Omega_k':Omega_K,
##'Omega_b':Omega_b,'omega_cdm':Omega_cdm[M],
'cs2_fld':1.,
'w0_fld':w0[M],'wa_fld':wa[M],'Omega_Lambda':0.,'gauge':gauge,
'use_ppf':use_ppf,'hyper_sampling_curved_low_nu':10.0})
cosmo[M].compute()
label = r'$\Omega_k='+str(Omega_K)+'$, '+gauge[0]
clfld = cosmo['FLD1'].raw_cl()
clppf = cosmo['PPF1'].raw_cl()
axes[0].semilogx(clfld['ell'][2:],clppf['tt'][2:]/clfld['tt'][2:],label=label)
ptfld = cosmo['FLD1'].get_perturbations()['scalar']
ptppf = cosmo['PPF1'].get_perturbations()['scalar']
for i,k in enumerate(k_out):
ptkfld = ptfld[i]
a = ptkfld['a']
phi_plus_phi_fld = ptkfld['phi']+ptkfld['psi']
ptkppf = ptppf[i]
phi_plus_phi_ppf = ptkppf['phi']+ptkppf['psi']
axes[1].semilogx(ptkppf['a'],phi_plus_phi_ppf,label=label+'_ppf')
axes[1].semilogx(ptkfld['a'],phi_plus_phi_fld,label=label+'_fld')
print (len(ptkppf['a']),len(ptkfld['a']))
axes[0].legend(loc='lower left',ncol=2)
axes[0].set_xlim([2,300])
axes[0].set_ylim([0.98,1.02])
axes[0].set_xlabel(r'$\ell$')
axes[0].set_ylabel(r'$C_\ell^\mathrm{FLD1}/C_\ell^\mathrm{PPF1}$')
axes[1].legend(loc='lower left',ncol=2)
axes[1].set_xlim([1e-2,1])
axes[1].set_xlabel(r'$a/a_0$')
axes[1].set_ylabel(r'$(\Phi+\Psi)$')
fig.savefig('check_PPF_Omegak.pdf')
# In[ ]:
colours = ['r','k','g','m']
k_out = [1e-1] #[1e-4, 1e-3, 1e-2]
#models = ['PPF1','PPF2','FLD1']
models = ['PPF1','FLD1']
w0 = {'PPF1':-0.7,'PPF2':-1.15,'FLD1':-0.7,'FLD1S':-0.7}
wa = {'PPF1':0.,'PPF2':0.5,'FLD1':0.,'FLD1S':0.}
omega_cdm = {'PPF1':0.104976,'PPF2':0.120376,'FLD1':0.104976,'FLD1S':0.104976}
omega_b = 0.022
##Omega_cdm = {'PPF1':0.26,'PPF2':0.21,'FLD1':0.26,'FLD1S':0.26}
##Omega_b = 0.05
h = {'PPF1':0.64,'PPF2':0.74,'FLD1':0.64}
fig, axes = plt.subplots(1,2,figsize=(18,8))
for Omega_K in [-0.1, 0.0, 0.15]:
for ppfgauge in ['Synchronous','Newtonian']:
cosmo = {}
for M in models:
use_ppf = 'yes'
gauge = ppfgauge
if 'FLD' in M:
use_ppf = 'no'
cosmo[M] = Class()
cosmo[M].set({'output':'tCl mPk dTk vTk','k_output_values':str(k_out).strip('[]'),
'h':h[M],
'omega_b':omega_b,'omega_cdm':omega_cdm[M],'Omega_k':Omega_K,
##'Omega_b':Omega_b,'omega_cdm':Omega_cdm[M],
'cs2_fld':1.,
'w0_fld':w0[M],'wa_fld':wa[M],'Omega_Lambda':0.,'gauge':gauge,
'use_ppf':use_ppf,'hyper_sampling_curved_low_nu':6.1})
cosmo[M].compute()
#fig, axes = plt.subplots(1,2,figsize=(16,5))
for j,M in enumerate(models):
cl = cosmo[M].raw_cl()
l = cl['ell']
label = M+r'$\Omega_k='+str(Omega_K)+'$, '+gauge[0]
axes[0].loglog(l,cl['tt']*l*(l+1)/(2.*np.pi),label=label,color=colours[j])
csm = cosmo[M]
pt = csm.get_perturbations()
pts = pt['scalar']
for i,k in enumerate(k_out):
ptk = pts[i]
a = ptk['a']
phi = ptk['phi']
psi = ptk['psi']
if 'FLD' in M:
ls = ':'
lw=5
else:
ls = '-'
lw=1
axes[1].semilogx(a,0.5*abs(phi+psi),label=label+' '+'$k='+str(k)+'Mpc^{-1}$',ls=ls,lw=lw)
axes[0].legend(loc='upper left')
axes[0].set_xlim([2,300])
axes[0].set_ylim([6e-11,1e-9])
axes[0].set_xlabel(r'$\ell$')
axes[0].set_ylabel(r'$[\ell(\ell+1)/2\pi] C_\ell^\mathrm{TT}$')
axes[1].legend(loc='upper right')
#axes[1].set_xlim([1e-2,1])
#axes[1].set_ylim([0.3,0.63])
axes[1].set_xlabel(r'$a/a_0$')
axes[1].set_ylabel(r'$\frac{1}{2}~(\Phi+\Psi)$')
fig.savefig('check_PPF_Omegak2.pdf')
# In[ ]:
print (0.31*0.64**2-0.022)
print (0.26*0.74**2-0.022)
| 7,163 | 28.240816 | 105 | py |
class_DMDR | class_DMDR-master/scripts/warmup.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import classy module
from classy import Class
# In[ ]:
# create instance of the class "Class"
LambdaCDM = Class()
# pass input parameters
LambdaCDM.set({'omega_b':0.0223828,'omega_cdm':0.1201075,'h':0.67810,'A_s':2.100549e-09,'n_s':0.9660499,'tau_reio':0.05430842})
LambdaCDM.set({'output':'tCl,pCl,lCl,mPk','lensing':'yes','P_k_max_1/Mpc':3.0})
# run class
LambdaCDM.compute()
# In[ ]:
# get all C_l output
cls = LambdaCDM.lensed_cl(2500)
# To check the format of cls
cls.keys()
# In[ ]:
ll = cls['ell'][2:]
clTT = cls['tt'][2:]
clEE = cls['ee'][2:]
clPP = cls['pp'][2:]
# In[ ]:
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
from math import pi
# In[ ]:
# plot C_l^TT
plt.figure(1)
plt.xscale('log');plt.yscale('linear');plt.xlim(2,2500)
plt.xlabel(r'$\ell$')
plt.ylabel(r'$[\ell(\ell+1)/2\pi] C_\ell^\mathrm{TT}$')
plt.plot(ll,clTT*ll*(ll+1)/2./pi,'r-')
# In[ ]:
plt.savefig('warmup_cltt.pdf')
# In[ ]:
# get P(k) at redhsift z=0
import numpy as np
kk = np.logspace(-4,np.log10(3),1000) # k in h/Mpc
Pk = [] # P(k) in (Mpc/h)**3
h = LambdaCDM.h() # get reduced Hubble for conversions to 1/Mpc
for k in kk:
Pk.append(LambdaCDM.pk(k*h,0.)*h**3) # function .pk(k,z)
# In[ ]:
# plot P(k)
plt.figure(2)
plt.xscale('log');plt.yscale('log');plt.xlim(kk[0],kk[-1])
plt.xlabel(r'$k \,\,\,\, [h/\mathrm{Mpc}]$')
plt.ylabel(r'$P(k) \,\,\,\, [\mathrm{Mpc}/h]^3$')
plt.plot(kk,Pk,'b-')
# In[ ]:
plt.savefig('warmup_pk.pdf')
# In[ ]:
# optional: reset parameters to default in case you want
# to set different parameters and rerun LambdaCDM.compute()
LambdaCDM.empty()
| 1,742 | 16.088235 | 127 | py |
class_DMDR | class_DMDR-master/scripts/one_time.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
from classy import Class
from math import pi
# In[ ]:
#####################################################
#
# Cosmological parameters and other CLASS parameters
#
#####################################################
common_settings = {# LambdaCDM parameters
'h':0.67810,
'omega_b':0.02238280,
'omega_cdm':0.12038,
'A_s':2.100549e-09,
'n_s': 0.9660499,
'tau_reio':0.05430842,
# output and precision parameters
'output':'tCl,mTk,vTk',
'l_max_scalars':5000,
'P_k_max_1/Mpc':10.0,
'gauge':'newtonian'
}
# In[ ]:
###############
#
# call CLASS a first time just to compute z_rec (will compute transfer functions at default: z=0)
#
###############
M = Class()
M.set(common_settings)
M.compute()
derived = M.get_current_derived_parameters(['z_rec','tau_rec','conformal_age'])
print (derived.keys())
z_rec = derived['z_rec']
z_rec = int(1000.*z_rec)/1000. # round down at 4 digits after coma
print ('z_rec=',z_rec)
#
# In the last figure the x-axis will show l/(tau_0-tau_rec), so we need (tau_0-tau_rec) in units of [Mpc/h]
#
tau_0_minus_tau_rec_hMpc = (derived['conformal_age']-derived['tau_rec'])*M.h()
# In[ ]:
################
#
# call CLASS again for the perturbations (will compute transfer functions at input value z_rec)
#
################
M.empty() # reset input parameters to default, before passing a new parameter set
M.set(common_settings)
M.set({'z_pk':z_rec})
M.compute()
#
# save the total Cl's (we will plot them in the last step)
#
cl_tot = M.raw_cl(5000)
#
#
# load transfer functions at recombination
#
one_time = M.get_transfer(z_rec)
print (one_time.keys())
k = one_time['k (h/Mpc)']
Theta0 = 0.25*one_time['d_g']
phi = one_time['phi']
psi = one_time['psi']
theta_b = one_time['t_b']
# compute related quantitites
R = 3./4.*M.Omega_b()/M.Omega_g()/(1+z_rec) # R = 3/4 * (rho_b/rho_gamma) at z_rec
zero_point = -(1.+R)*psi # zero point of oscillations: -(1.+R)*psi
Theta0_amp = max(Theta0.max(),-Theta0.min()) # Theta0 oscillation amplitude (for vertical scale of plot)
print ('At z_rec: R=',R,', Theta0_amp=',Theta0_amp)
# In[ ]:
# use table of background quantitites to find the wavenumbers corresponding to
# Hubble crossing (k = 2 pi a H), sound horizon crossing (k = 2pi / rs)
#
background = M.get_background() # load background table
print (background.keys())
#
background_tau = background['conf. time [Mpc]'] # read confromal times in background table
background_z = background['z'] # read redshift
background_kh = 2.*pi*background['H [1/Mpc]']/(1.+background['z'])/M.h() # read kh = 2pi aH = 2pi H/(1+z) converted to [h/Mpc]
background_ks = 2.*pi/background['comov.snd.hrz.']/M.h() # read ks = 2pi/rs converted to [h/Mpc]
#
# define interpolation functions; we want the value of tau when the argument is equal to 2pi
#
from scipy.interpolate import interp1d
kh_at_tau = interp1d(background_tau,background_kh)
ks_at_tau = interp1d(background_tau,background_ks)
#
# finally get these scales
#
tau_rec = derived['tau_rec']
kh = kh_at_tau(tau_rec)
ks = ks_at_tau(tau_rec)
print ('at tau_rec=',tau_rec,', kh=',kh,', ks=',ks)
# In[ ]:
#####################
#
# call CLASS with TSW (intrinsic temperature + Sachs-Wolfe) and save
#
#####################
M.empty() # clean input
M.set(common_settings) # new input
M.set({'temperature contributions':'tsw'})
M.compute()
cl_TSW = M.raw_cl(5000)
# In[ ]:
######################
#
# call CLASS with early ISW and save
#
######################
M.empty()
M.set(common_settings)
M.set({'temperature contributions':'eisw'})
M.compute()
cl_eISW = M.raw_cl(5000)
# In[ ]:
######################
#
# call CLASS with late ISW and save
#
######################
M.empty()
M.set(common_settings)
M.set({'temperature contributions':'lisw'})
M.compute()
cl_lISW = M.raw_cl(5000)
# In[ ]:
######################
#
# call CLASS with Doppler and save
#
######################
M.empty()
M.set(common_settings)
M.set({'temperature contributions':'dop'})
M.compute()
cl_Doppler = M.raw_cl(5000)
# In[ ]:
# modules and esthetic definitions for the plots
#
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
#
import matplotlib
import matplotlib.pyplot as plt
#
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
# In[ ]:
#################
#
# start plotting
#
#################
#
fig, (ax_Tk, ax_Tk2, ax_Cl) = plt.subplots(3,sharex=True,figsize=(8,12))
fig.subplots_adjust(hspace=0)
##################
#
# first figure with transfer functions
#
##################
ax_Tk.set_xlim([3.e-4,0.5])
ax_Tk.set_ylim([-1.1*Theta0_amp,1.1*Theta0_amp])
ax_Tk.tick_params(axis='x',which='both',bottom='off',top='on',labelbottom='off',labeltop='on')
ax_Tk.set_xlabel(r'$\mathrm{k} \,\,\, \mathrm{[h/Mpc]}$')
ax_Tk.set_ylabel(r'$\mathrm{Transfer}(\tau_\mathrm{dec},k)$')
ax_Tk.xaxis.set_label_position('top')
ax_Tk.grid()
#
ax_Tk.axvline(x=kh,color='r')
ax_Tk.axvline(x=ks,color='y')
#
ax_Tk.annotate(r'Hubble cross.',
xy=(kh,0.8*Theta0_amp),
xytext=(0.15*kh,0.9*Theta0_amp),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
ax_Tk.annotate(r'sound hor. cross.',
xy=(ks,0.8*Theta0_amp),
xytext=(1.3*ks,0.9*Theta0_amp),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
#
ax_Tk.semilogx(k,psi,'y-',label=r'$\psi$')
ax_Tk.semilogx(k,phi,'r-',label=r'$\phi$')
ax_Tk.semilogx(k,zero_point,'k:',label=r'$-(1+R)\psi$')
ax_Tk.semilogx(k,Theta0,'b-',label=r'$\Theta_0$')
ax_Tk.semilogx(k,(Theta0+psi),'c',label=r'$\Theta_0+\psi$')
ax_Tk.semilogx(k,theta_b,'g-',label=r'$\theta_b$')
#
ax_Tk.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
#######################
#
# second figure with transfer functions squared
#
#######################
ax_Tk2.set_xlim([3.e-4,0.5])
ax_Tk2.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off',labeltop='off')
ax_Tk2.set_ylabel(r'$\mathrm{Transfer}(\tau_\mathrm{dec},k)^2$')
ax_Tk2.grid()
#
ax_Tk2.semilogx(k,(Theta0+psi)**2,'c',label=r'$(\Theta_0+\psi)^2$')
#
ax_Tk2.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
########################
#
# third figure with all contributions to Cls
#
# We already computed each contribution (TSW, earlyISW, lateISW, Doppler, total)
# Note that there is another contribution from polarisation. We don't plot it because it is
# too small to be seen, however it is included by default in the total.
#
# After each step we will save the figure (to get intermediate figures that can be used in slides)
#
#########################
# presentation settings
ax_Cl.set_xlim([3.e-4,0.5])
ax_Cl.set_ylim([0.,8.])
ax_Cl.set_xlabel(r'$\ell/(\tau_0-\tau_{rec}) \,\,\, \mathrm{[h/Mpc]}$')
ax_Cl.set_ylabel(r'$\ell (\ell+1) C_l^{TT} / 2 \pi \,\,\, [\times 10^{10}]$')
ax_Cl.tick_params(axis='x',which='both',bottom='on',top='off',labelbottom='on',labeltop='off')
ax_Cl.grid()
#
# plot and save with TSW
#
ax_Cl.semilogx(cl_TSW['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl_TSW['ell']*(cl_TSW['ell']+1.)*cl_TSW['tt']/2./pi,'c-',label=r'$\mathrm{T+SW}$')
#
ax_Cl.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
fig.savefig('one_time_with_cl_1.pdf',bbox_inches='tight')
#
# plot and save with additionally early ISW and late ISW
#
ax_Cl.semilogx(cl_eISW['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl_eISW['ell']*(cl_eISW['ell']+1.)*cl_eISW['tt']/2./pi,'r-',label=r'$\mathrm{early} \,\, \mathrm{ISW}$')
ax_Cl.semilogx(cl_lISW['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl_lISW['ell']*(cl_lISW['ell']+1.)*cl_lISW['tt']/2./pi,'y-',label=r'$\mathrm{late} \,\, \mathrm{ISW}$')
#
ax_Cl.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
fig.savefig('one_time_with_cl_2.pdf',bbox_inches='tight')
#
# plot and save with additionally Doppler
#
ax_Cl.semilogx(cl_Doppler['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl_Doppler['ell']*(cl_Doppler['ell']+1.)*cl_Doppler['tt']/2./pi,'g-',label=r'$\mathrm{Doppler}$')
#
ax_Cl.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
fig.savefig('one_time_with_cl_3.pdf',bbox_inches='tight')
#
# plot and save with additionally total Cls
#
ax_Cl.semilogx(cl_tot['ell']/tau_0_minus_tau_rec_hMpc,1.e10*cl_tot['ell']*(cl_tot['ell']+1.)*cl_tot['tt']/2./pi,'k-',label=r'$\mathrm{Total}$')
#
ax_Cl.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
fig.savefig('one_time_with_cl_tot.pdf',bbox_inches='tight')
| 8,830 | 28.33887 | 165 | py |
class_DMDR | class_DMDR-master/scripts/one_k.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
import math
# In[ ]:
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
# In[ ]:
#############################################
#
# value of k that we want to follow in [1/Mpc]
#
k = 0.5 # 1/Mpc
#
# Cosmological parameters and other CLASS parameters
#
common_settings = {# we need to set the output field to something although
# the really releveant outpout here will be set with 'k_output_values'
'output':'mPk',
# value of k we want to polot in [1/Mpc]
'k_output_values':k,
# LambdaCDM parameters
'h':0.67810,
'omega_b':0.02238280,
'omega_cdm':0.1201075,
'A_s':2.100549e-09 ,
'n_s':0.9660499,
'tau_reio':0.05430842,
# Take fixed value for primordial Helium (instead of automatic BBN adjustment)
'YHe':0.2454,
# other options and settings
'compute damping scale':'yes', # needed to output the time of damping scale crossing
'gauge':'newtonian'}
##############
#
# call CLASS
#
M = Class()
M.set(common_settings)
M.compute()
#
# load perturbations
#
all_k = M.get_perturbations() # this potentially constains scalars/tensors and all k values
print (all_k['scalar'][0].keys())
#
one_k = all_k['scalar'][0] # this contains only the scalar perturbations for the requested k values
#
tau = one_k['tau [Mpc]']
Theta0 = 0.25*one_k['delta_g']
phi = one_k['phi']
psi = one_k['psi']
theta_b = one_k['theta_b']
a = one_k['a']
# compute related quantitites
R = 3./4.*M.Omega_b()/M.Omega_g()*a # R = 3/4 * (rho_b/rho_gamma)
zero_point = -(1.+R)*psi # zero point of oscillations: -(1.+R)*psi
#
# get Theta0 oscillation amplitude (for vertical scale of plot)
#
Theta0_amp = max(Theta0.max(),-Theta0.min())
#
# get the time of decoupling
#
quantities = M.get_current_derived_parameters(['tau_rec'])
# print times.viewkeys()
tau_rec = quantities['tau_rec']
#
# use table of background quantitites to find the time of
# Hubble crossing (k / (aH)= 2 pi), sound horizon crossing (k * rs = 2pi)
#
background = M.get_background() # load background table
#print background.viewkeys()
#
background_tau = background['conf. time [Mpc]'] # read confromal times in background table
background_z = background['z'] # read redshift
background_k_over_aH = k/background['H [1/Mpc]']*(1.+background['z']) # read k/aH = k(1+z)/H
background_k_rs = k * background['comov.snd.hrz.'] # read k * rs
background_rho_m_over_r = (background['(.)rho_b']+background['(.)rho_cdm']) /(background['(.)rho_g']+background['(.)rho_ur']) # read rho_r / rho_m (to find time of equality)
#
# define interpolation functions; we want the value of tau when the argument is equal to 2pi (or 1 for equality)
#
tau_at_k_over_aH = interp1d(background_k_over_aH,background_tau)
tau_at_k_rs = interp1d(background_k_rs,background_tau)
tau_at_rho_m_over_r = interp1d(background_rho_m_over_r,background_tau)
#
# finally get these times
#
tau_Hubble = tau_at_k_over_aH(2.*math.pi)
tau_s = tau_at_k_rs(2.*math.pi)
tau_eq = tau_at_rho_m_over_r(1.)
#
#################
#
# start plotting
#
#################
#
plt.xlim([tau[0],tau_rec*1.3])
plt.ylim([-1.3*Theta0_amp,1.3*Theta0_amp])
plt.xlabel(r'$\tau \,\,\, \mathrm{[Mpc]}$')
plt.title(r'$\mathrm{Transfer} (\tau,k) \,\,\, \mathrm{for} \,\,\, k=%g \,\,\, [1/\mathrm{Mpc}]$'%k)
plt.grid()
#
plt.axvline(x=tau_Hubble,color='r')
plt.axvline(x=tau_s,color='y')
plt.axvline(x=tau_eq,color='k')
plt.axvline(x=tau_rec,color='k')
#
plt.annotate(r'Hubble cross.',
xy=(tau_Hubble,1.08*Theta0_amp),
xytext=(0.15*tau_Hubble,1.18*Theta0_amp),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
plt.annotate(r'sound hor. cross.',
xy=(tau_s,-1.0*Theta0_amp),
xytext=(1.5*tau_s,-1.2*Theta0_amp),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
plt.annotate(r'eq.',
xy=(tau_eq,1.08*Theta0_amp),
xytext=(0.45*tau_eq,1.18*Theta0_amp),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
plt.annotate(r'rec.',
xy=(tau_rec,1.08*Theta0_amp),
xytext=(0.45*tau_rec,1.18*Theta0_amp),
arrowprops=dict(facecolor='black', shrink=0.05, width=1, headlength=5, headwidth=5))
#
# Possibility to add functions one by one, saving between each (for slides)
#
plt.semilogx(tau,psi,'y-',label=r'$\psi$')
#plt.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
#plt.savefig('one_k_1.pdf',bbox_inches='tight')
#
plt.semilogx(tau,phi,'r-',label=r'$\phi$')
#plt.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
#plt.savefig('one_k_2.pdf',bbox_inches='tight')
#
plt.semilogx(tau,zero_point,'k:',label=r'$-(1+R)\psi$')
#plt.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
#plt.savefig('one_k_3.pdf',bbox_inches='tight')
#
plt.semilogx(tau,Theta0,'b-',linewidth=2,label=r'$\Theta_0$')
#plt.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
#plt.savefig('one_k_4.pdf',bbox_inches='tight')
#
plt.semilogx(tau,Theta0+psi,'c-',linewidth=2,label=r'$\Theta_0+\psi$')
#plt.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
#plt.savefig('one_k_5.pdf',bbox_inches='tight')
#
plt.semilogx(tau,theta_b,'g-',label=r'$\theta_b$')
plt.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
plt.savefig('one_k.pdf',bbox_inches='tight')
#
| 6,113 | 33.542373 | 179 | py |
class_DMDR | class_DMDR-master/scripts/cl_ST.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
from classy import Class
from math import pi
# In[ ]:
#####################################################
#
# Cosmological parameters and other CLASS parameters
#
#####################################################
common_settings = {# LambdaCDM parameters
'h':0.67810,
'omega_b':0.02238280,
'omega_cdm': 0.1201075,
'A_s':2.100549e-09,
'tau_reio': 0.05430842}
l_max_scalars = 3000
l_max_tensors = 600
# Note that for l_max_tensors =600 we can keep default precision,
# while for for l_max_tensors = 3000 we would need to import many high precision settings from the file cl_ref.pre
# In[ ]:
###############
#
# call CLASS : scalars only
#
###############
#
M = Class()
M.set(common_settings)
M.set({'output':'tCl,pCl','modes':'s','lensing':'no','n_s':0.9660499,
'l_max_scalars':l_max_scalars})
M.compute()
cls = M.raw_cl(l_max_scalars)
# In[ ]:
###############
#
# call CLASS : tensors only
#
###############
#
M.empty() # reset input parameters to default, before passing a new parameter set
M.set(common_settings)
M.set({'output':'tCl,pCl','modes':'t','lensing':'no','r':0.1,'n_t':0,
'l_max_tensors':l_max_tensors})
M.compute()
clt = M.raw_cl(l_max_tensors)
# In[ ]:
###############
#
# call CLASS : scalars + tensors (only in this case we can get the correct lensed ClBB)
#
###############
#
M.empty() # reset input parameters to default, before passing a new parameter set
M.set(common_settings)
M.set({'output':'tCl,pCl,lCl','modes':'s,t','lensing':'yes','n_s':0.9660499,'r':0.1,'n_t':0,
'l_max_scalars':l_max_scalars,'l_max_tensors':l_max_tensors})
M.compute()
cl_tot = M.raw_cl(l_max_scalars)
cl_lensed = M.lensed_cl(l_max_scalars)
# In[ ]:
# modules and esthetic definitions for the plots
#
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
#
import matplotlib
import matplotlib.pyplot as plt
#
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
# In[ ]:
#################
#
# plotting
#
#################
#
plt.xlim([2,l_max_scalars])
plt.ylim([1.e-8,10])
plt.xlabel(r"$\ell$")
plt.ylabel(r"$\ell (\ell+1) C_l^{XY} / 2 \pi \,\,\, [\times 10^{10}]$")
plt.title(r"$r=0.1$")
plt.grid()
#
ell = cl_tot['ell']
ellt = clt['ell']
factor = 1.e10*ell*(ell+1.)/2./pi
factort = 1.e10*ellt*(ellt+1.)/2./pi
#
plt.loglog(ell,factor*cls['tt'],'r-',label=r'$\mathrm{TT(s)}$')
plt.loglog(ellt,factort*clt['tt'],'r:',label=r'$\mathrm{TT(t)}$')
plt.loglog(ell,factor*cls['ee'],'b-',label=r'$\mathrm{EE(s)}$')
plt.loglog(ellt,factort*clt['ee'],'b:',label=r'$\mathrm{EE(t)}$')
plt.loglog(ellt,factort*clt['bb'],'g:',label=r'$\mathrm{BB(t)}$')
plt.loglog(ell,factor*(cl_lensed['bb']-cl_tot['bb']),'g-',label=r'$\mathrm{BB(lensing)}$')
plt.legend(loc='right',bbox_to_anchor=(1.4, 0.5))
# In[ ]:
plt.savefig('cl_ST.pdf',bbox_inches='tight')
| 3,156 | 21.876812 | 114 | py |
class_DMDR | class_DMDR-master/scripts/varying_pann.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# import necessary modules
# uncomment to get plots displayed in notebook
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
from scipy.optimize import fsolve
from math import pi
# In[ ]:
# esthetic definitions for the plots
font = {'size' : 16, 'family':'STIXGeneral'}
axislabelfontsize='large'
matplotlib.rc('font', **font)
matplotlib.mathtext.rcParams['legend.fontsize']='medium'
plt.rcParams["figure.figsize"] = [8.0,6.0]
# In[ ]:
############################################
#
# Varying parameter (others fixed to default)
#
# With the input suntax of class <= 2.9 we used: annihilation = 1.e-5 m^3/s/Kg
# With the new syntax this is equivalent to DM_annihilation_efficiency = 1.11e-22 m^3/s/J
# (the ratio is a factor (c/[1 m/s])**2 = 9.e16)
#
var_name = 'DM_annihilation_efficiency'
var_array = np.linspace(0,1.11e-22,5)
var_num = len(var_array)
var_legend = r'$p_\mathrm{ann}$'
var_figname = 'pann'
#
#############################################
#
# Fixed settings
#
common_settings = {# LambdaCDM parameters
'h':0.67556,
'omega_b':0.022032,
'omega_cdm':0.12038,
'A_s':2.215e-9,
'n_s':0.9619,
'tau_reio':0.0925,
# output and precision parameters
'output':'tCl,pCl,lCl,mPk',
'lensing':'yes',
'P_k_max_1/Mpc':3.0,
'l_switch_limber':9
}
#
# arrays for output
#
kvec = np.logspace(-4,np.log10(3),1000)
legarray = []
twopi = 2.*pi
#
# Create figures
#
fig_Pk, ax_Pk = plt.subplots()
fig_TT, ax_TT = plt.subplots()
fig_EE, ax_EE = plt.subplots()
fig_PP, ax_PP = plt.subplots()
#
M = Class()
#
# loop over varying parameter values
#
for i,var in enumerate(var_array):
#
print (' * Compute with %s=%e'%(var_name,var))
#
# deal with colors and legends
#
if i == 0:
var_color = 'k'
var_alpha = 1.
legarray.append(r'ref. $\Lambda CDM$')
else:
var_color = 'r'
var_alpha = 1.*i/(var_num-1.)
if i == var_num-1:
legarray.append(var_legend)
#
# call CLASS
#
M.set(common_settings)
M.set({var_name:var})
M.compute()
#
# get Cls
#
clM = M.lensed_cl(2500)
ll = clM['ell'][2:]
clTT = clM['tt'][2:]
clEE = clM['ee'][2:]
clPP = clM['pp'][2:]
#
# get P(k) for common k values
#
pkM = []
for k in kvec:
pkM.append(M.pk(k,0.))
#
# plot P(k)
#
ax_Pk.loglog(kvec,np.array(pkM),color=var_color,alpha=var_alpha,linestyle='-')
#
# plot C_l^TT
#
ax_TT.semilogx(ll,clTT*ll*(ll+1)/twopi,color=var_color,alpha=var_alpha,linestyle='-')
#
# plot Cl EE
#
ax_EE.loglog(ll,clEE*ll*(ll+1)/twopi,color=var_color,alpha=var_alpha,linestyle='-')
#
# plot Cl phiphi
#
ax_PP.loglog(ll,clPP*ll*(ll+1)*ll*(ll+1)/twopi,color=var_color,alpha=var_alpha,linestyle='-')
#
# reset CLASS
#
M.empty()
#
# output of P(k) figure
#
ax_Pk.set_xlim([1.e-4,3.])
ax_Pk.set_xlabel(r'$k \,\,\,\, [h/\mathrm{Mpc}]$')
ax_Pk.set_ylabel(r'$P(k) \,\,\,\, [\mathrm{Mpc}/h]^3$')
ax_Pk.legend(legarray)
fig_Pk.tight_layout()
fig_Pk.savefig('varying_%s_Pk.pdf' % var_figname)
#
# output of C_l^TT figure
#
ax_TT.set_xlim([2,2500])
ax_TT.set_xlabel(r'$\ell$')
ax_TT.set_ylabel(r'$[\ell(\ell+1)/2\pi] C_\ell^\mathrm{TT}$')
ax_TT.legend(legarray)
fig_TT.tight_layout()
fig_TT.savefig('varying_%s_cltt.pdf' % var_figname)
#
# output of C_l^EE figure
#
ax_EE.set_xlim([2,2500])
ax_EE.set_xlabel(r'$\ell$')
ax_EE.set_ylabel(r'$[\ell(\ell+1)/2\pi] C_\ell^\mathrm{EE}$')
ax_EE.legend(legarray)
fig_EE.tight_layout()
fig_EE.savefig('varying_%s_clee.pdf' % var_figname)
#
# output of C_l^pp figure
#
ax_PP.set_xlim([10,2500])
ax_PP.set_xlabel(r'$\ell$')
ax_PP.set_ylabel(r'$[\ell^2(\ell+1)^2/2\pi] C_\ell^\mathrm{\phi \phi}$')
ax_PP.legend(legarray)
fig_PP.tight_layout()
fig_PP.savefig('varying_%s_clpp.pdf' % var_figname)
| 4,181 | 23.313953 | 97 | py |
3D-Deepbox | 3D-Deepbox-master/main.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import cv2, os
import numpy as np
import time
from random import shuffle
from data_processing import *
import sys
import argparse
from tqdm import tqdm
#####
#Training setting
BIN, OVERLAP = 2, 0.1
W = 1.
ALPHA = 1.
MAX_JIT = 3
NORM_H, NORM_W = 224, 224
VEHICLES = ['Car', 'Truck', 'Van', 'Tram','Pedestrian','Cyclist']
BATCH_SIZE = 8
learning_rate = 0.0001
epochs = 50
save_path = './model/'
dims_avg = {'Cyclist': np.array([ 1.73532436, 0.58028152, 1.77413709]), 'Van': np.array([ 2.18928571, 1.90979592, 5.07087755]), 'Tram': np.array([ 3.56092896, 2.39601093, 18.34125683]), 'Car': np.array([ 1.52159147, 1.64443089, 3.85813679]), 'Pedestrian': np.array([ 1.75554637, 0.66860882, 0.87623049]), 'Truck': np.array([ 3.07392252, 2.63079903, 11.2190799 ])}
#### Placeholder
inputs = tf.placeholder(tf.float32, shape = [None, 224, 224, 3])
d_label = tf.placeholder(tf.float32, shape = [None, 3])
o_label = tf.placeholder(tf.float32, shape = [None, BIN, 2])
c_label = tf.placeholder(tf.float32, shape = [None, BIN])
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='3D bounding box')
parser.add_argument('--mode',dest = 'mode',help='train or test',default = 'test')
parser.add_argument('--image',dest = 'image',help='Image path')
parser.add_argument('--label',dest = 'label',help='Label path')
parser.add_argument('--box2d',dest = 'box2d',help='2D detection path')
parser.add_argument('--output',dest = 'output',help='Output path', default = './validation/result_2/')
parser.add_argument('--model',dest = 'model')
parser.add_argument('--gpu',dest = 'gpu',default= '0')
args = parser.parse_args()
return args
def build_model():
#### build some layer
def LeakyReLU(x, alpha):
return tf.nn.relu(x) - alpha * tf.nn.relu(-x)
def orientation_loss(y_true, y_pred):
# Find number of anchors
anchors = tf.reduce_sum(tf.square(y_true), axis=2)
anchors = tf.greater(anchors, tf.constant(0.5))
anchors = tf.reduce_sum(tf.cast(anchors, tf.float32), 1)
# Define the loss
loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
loss = tf.reduce_sum((2 - 2 * tf.reduce_mean(loss,axis=0))) / anchors
return tf.reduce_mean(loss)
#####
#Build Graph
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
conv5 = tf.contrib.layers.flatten(net)
#dimension = slim.fully_connected(conv5, 512, scope='fc7_d')
dimension = slim.fully_connected(conv5, 512, activation_fn=None, scope='fc7_d')
dimension = LeakyReLU(dimension, 0.1)
dimension = slim.dropout(dimension, 0.5, scope='dropout7_d')
#dimension = slim.fully_connected(dimension, 3, scope='fc8_d')
dimension = slim.fully_connected(dimension, 3, activation_fn=None, scope='fc8_d')
#dimension = LeakyReLU(dimension, 0.1)
#loss_d = tf.reduce_mean(tf.square(d_label - dimension))
loss_d = tf.losses.mean_squared_error(d_label, dimension)
#orientation = slim.fully_connected(conv5, 256, scope='fc7_o')
orientation = slim.fully_connected(conv5, 256, activation_fn=None, scope='fc7_o')
orientation = LeakyReLU(orientation, 0.1)
orientation = slim.dropout(orientation, 0.5, scope='dropout7_o')
#orientation = slim.fully_connected(orientation, BIN*2, scope='fc8_o')
orientation = slim.fully_connected(orientation, BIN*2, activation_fn=None, scope='fc8_o')
#orientation = LeakyReLU(orientation, 0.1)
orientation = tf.reshape(orientation, [-1, BIN, 2])
orientation = tf.nn.l2_normalize(orientation, dim=2)
loss_o = orientation_loss(o_label, orientation)
#confidence = slim.fully_connected(conv5, 256, scope='fc7_c')
confidence = slim.fully_connected(conv5, 256, activation_fn=None, scope='fc7_c')
confidence = LeakyReLU(confidence, 0.1)
confidence = slim.dropout(confidence, 0.5, scope='dropout7_c')
confidence = slim.fully_connected(confidence, BIN, activation_fn=None, scope='fc8_c')
loss_c = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=c_label, logits= confidence))
confidence = tf.nn.softmax(confidence)
#loss_c = tf.reduce_mean(tf.square(c_label - confidence))
#loss_c = tf.losses.mean_squared_error(c_label, confidence)
total_loss = 4. * loss_d + 8. * loss_o + loss_c
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)
return dimension, orientation, confidence, total_loss, optimizer, loss_d, loss_o, loss_c
def train(image_dir, box2d_loc, label_dir):
# load data & gen data
all_objs = parse_annotation(label_dir, image_dir)
all_exams = len(all_objs)
np.random.shuffle(all_objs)
train_gen = data_gen(image_dir, all_objs, BATCH_SIZE)
train_num = int(np.ceil(all_exams/BATCH_SIZE))
### buile graph
dimension, orientation, confidence, loss, optimizer, loss_d, loss_o, loss_c = build_model()
### GPU config
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
# create a folder for saving model
if os.path.isdir(save_path) == False:
os.mkdir(save_path)
variables_to_restore = slim.get_variables()[:26] ## vgg16-conv5
saver = tf.train.Saver(max_to_keep=100)
#Load pretrain VGG model
ckpt_list = tf.contrib.framework.list_variables('./vgg_16.ckpt')[1:-7]
new_ckpt_list = []
for name in range(1,len(ckpt_list),2):
tf.contrib.framework.init_from_checkpoint('./vgg_16.ckpt', {ckpt_list[name-1][0]: variables_to_restore[name]})
tf.contrib.framework.init_from_checkpoint('./vgg_16.ckpt', {ckpt_list[name][0]: variables_to_restore[name-1]})
# Initializing the variables
init = tf.global_variables_initializer()
sess.run(init)
# Start to train model
for epoch in range(epochs):
epoch_loss = np.zeros((train_num,1),dtype = float)
tStart_epoch = time.time()
batch_loss = 0.0
for num_iters in tqdm(range(train_num),ascii=True,desc='Epoch '+str(epoch+1)+' : Loss:'+str(batch_loss)):
train_img, train_label = train_gen.next()
_,batch_loss = sess.run([optimizer,loss], feed_dict={inputs: train_img, d_label: train_label[0], o_label: train_label[1], c_label: train_label[2]})
epoch_loss[num_iters] = batch_loss
# save model
if (epoch+1) % 5 == 0:
saver.save(sess,save_path+"model", global_step = epoch+1)
# Print some information
print "Epoch:", epoch+1, " done. Loss:", np.mean(epoch_loss)
tStop_epoch = time.time()
print "Epoch Time Cost:", round(tStop_epoch - tStart_epoch,2), "s"
sys.stdout.flush()
def test(model, image_dir, box2d_loc, box3d_loc):
### buile graph
dimension, orientation, confidence, loss, optimizer, loss_d, loss_o, loss_c = build_model()
### GPU config
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
# Initializing the variables
init = tf.global_variables_initializer()
sess.run(init)
# Restore model
saver = tf.train.Saver()
saver.restore(sess, model)
# create a folder for saving result
if os.path.isdir(box3d_loc) == False:
os.mkdir(box3d_loc)
# Load image & run testing
all_image = sorted(os.listdir(image_dir))
for f in all_image:
image_file = image_dir + f
box2d_file = box2d_loc + f.replace('png', 'txt')
box3d_file = box3d_loc + f.replace('png', 'txt')
print image_file
with open(box3d_file, 'w') as box3d:
img = cv2.imread(image_file)
img = img.astype(np.float32, copy=False)
for line in open(box2d_file):
line = line.strip().split(' ')
truncated = np.abs(float(line[1]))
occluded = np.abs(float(line[2]))
obj = {'xmin':int(float(line[4])),
'ymin':int(float(line[5])),
'xmax':int(float(line[6])),
'ymax':int(float(line[7])),
}
patch = img[obj['ymin']:obj['ymax'],obj['xmin']:obj['xmax']]
patch = cv2.resize(patch, (NORM_H, NORM_W))
patch = patch - np.array([[[103.939, 116.779, 123.68]]])
patch = np.expand_dims(patch, 0)
prediction = sess.run([dimension, orientation, confidence], feed_dict={inputs: patch})
# Transform regressed angle
max_anc = np.argmax(prediction[2][0])
anchors = prediction[1][0][max_anc]
if anchors[1] > 0:
angle_offset = np.arccos(anchors[0])
else:
angle_offset = -np.arccos(anchors[0])
wedge = 2.*np.pi/BIN
angle_offset = angle_offset + max_anc*wedge
angle_offset = angle_offset % (2.*np.pi)
angle_offset = angle_offset - np.pi/2
if angle_offset > np.pi:
angle_offset = angle_offset - (2.*np.pi)
line[3] = str(angle_offset)
line[-1] = angle_offset +np.arctan(float(line[11]) / float(line[13]))
# Transform regressed dimension
if line[0] in VEHICLES:
dims = dims_avg[line[0]] + prediction[0][0]
else:
dims = dims_avg['Car'] + prediction[0][0]
line = line[:8] + list(dims) + line[11:]
# Write regressed 3D dim and oritent to file
line = ' '.join([str(item) for item in line]) +' '+ str(np.max(prediction[2][0]))+ '\n'
box3d.write(line)
if __name__ == "__main__":
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.image is None:
raise IOError(('Image not found.'.format(args.image)))
if args.box2d is None :
raise IOError(('2D bounding box not found.'.format(args.box2d)))
if args.mode == 'train':
if args.label is None:
raise IOError(('Label not found.'.format(args.label)))
train(args.image, args.box2d, args.label)
else:
if args.model is None:
raise IOError(('Model not found.'.format(args.model)))
test(args.model, args.image, args.box2d, args.output)
| 11,431 | 38.557093 | 379 | py |
3D-Deepbox | 3D-Deepbox-master/data_processing.py | import tensorflow as tf
import cv2, os
import numpy as np
from random import shuffle
import copy
#####
#Training setting
BIN, OVERLAP = 2, 0.1
NORM_H, NORM_W = 224, 224
VEHICLES = ['Car', 'Truck', 'Van', 'Tram','Pedestrian','Cyclist']
def compute_anchors(angle):
anchors = []
wedge = 2.*np.pi/BIN
l_index = int(angle/wedge)
r_index = l_index + 1
if (angle - l_index*wedge) < wedge/2 * (1+OVERLAP/2):
anchors.append([l_index, angle - l_index*wedge])
if (r_index*wedge - angle) < wedge/2 * (1+OVERLAP/2):
anchors.append([r_index%BIN, angle - r_index*wedge])
return anchors
def parse_annotation(label_dir, image_dir):
all_objs = []
dims_avg = {key:np.array([0, 0, 0]) for key in VEHICLES}
dims_cnt = {key:0 for key in VEHICLES}
for label_file in sorted(os.listdir(label_dir)):
image_file = label_file.replace('txt', 'png')
for line in open(label_dir + label_file).readlines():
line = line.strip().split(' ')
truncated = np.abs(float(line[1]))
occluded = np.abs(float(line[2]))
if line[0] in VEHICLES and truncated < 0.1 and occluded < 0.1:
new_alpha = float(line[3]) + np.pi/2.
if new_alpha < 0:
new_alpha = new_alpha + 2.*np.pi
new_alpha = new_alpha - int(new_alpha/(2.*np.pi))*(2.*np.pi)
obj = {'name':line[0],
'image':image_file,
'xmin':int(float(line[4])),
'ymin':int(float(line[5])),
'xmax':int(float(line[6])),
'ymax':int(float(line[7])),
'dims':np.array([float(number) for number in line[8:11]]),
'new_alpha': new_alpha
}
dims_avg[obj['name']] = dims_cnt[obj['name']]*dims_avg[obj['name']] + obj['dims']
dims_cnt[obj['name']] += 1
dims_avg[obj['name']] /= dims_cnt[obj['name']]
all_objs.append(obj)
###### flip data
for obj in all_objs:
# Fix dimensions
obj['dims'] = obj['dims'] - dims_avg[obj['name']]
# Fix orientation and confidence for no flip
orientation = np.zeros((BIN,2))
confidence = np.zeros(BIN)
anchors = compute_anchors(obj['new_alpha'])
for anchor in anchors:
orientation[anchor[0]] = np.array([np.cos(anchor[1]), np.sin(anchor[1])])
confidence[anchor[0]] = 1.
confidence = confidence / np.sum(confidence)
obj['orient'] = orientation
obj['conf'] = confidence
# Fix orientation and confidence for flip
orientation = np.zeros((BIN,2))
confidence = np.zeros(BIN)
anchors = compute_anchors(2.*np.pi - obj['new_alpha'])
for anchor in anchors:
orientation[anchor[0]] = np.array([np.cos(anchor[1]), np.sin(anchor[1])])
confidence[anchor[0]] = 1
confidence = confidence / np.sum(confidence)
obj['orient_flipped'] = orientation
obj['conf_flipped'] = confidence
return all_objs
def prepare_input_and_output(image_dir, train_inst):
### Prepare image patch
xmin = train_inst['xmin'] #+ np.random.randint(-MAX_JIT, MAX_JIT+1)
ymin = train_inst['ymin'] #+ np.random.randint(-MAX_JIT, MAX_JIT+1)
xmax = train_inst['xmax'] #+ np.random.randint(-MAX_JIT, MAX_JIT+1)
ymax = train_inst['ymax'] #+ np.random.randint(-MAX_JIT, MAX_JIT+1)
img = cv2.imread(image_dir + train_inst['image'])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = copy.deepcopy(img[ymin:ymax+1,xmin:xmax+1]).astype(np.float32)
# re-color the image
#img += np.random.randint(-2, 3, img.shape).astype('float32')
#t = [np.random.uniform()]
#t += [np.random.uniform()]
#t += [np.random.uniform()]
#t = np.array(t)
#img = img * (1 + t)
#img = img / (255. * 2.)
# flip the image
flip = np.random.binomial(1, .5)
if flip > 0.5: img = cv2.flip(img, 1)
# resize the image to standard size
img = cv2.resize(img, (NORM_H, NORM_W))
img = img - np.array([[[103.939, 116.779, 123.68]]])
#img = img[:,:,::-1]
### Fix orientation and confidence
if flip > 0.5:
return img, train_inst['dims'], train_inst['orient_flipped'], train_inst['conf_flipped']
else:
return img, train_inst['dims'], train_inst['orient'], train_inst['conf']
def data_gen(image_dir, all_objs, batch_size):
num_obj = len(all_objs)
keys = range(num_obj)
np.random.shuffle(keys)
l_bound = 0
r_bound = batch_size if batch_size < num_obj else num_obj
while True:
if l_bound == r_bound:
l_bound = 0
r_bound = batch_size if batch_size < num_obj else num_obj
np.random.shuffle(keys)
currt_inst = 0
x_batch = np.zeros((r_bound - l_bound, 224, 224, 3))
d_batch = np.zeros((r_bound - l_bound, 3))
o_batch = np.zeros((r_bound - l_bound, BIN, 2))
c_batch = np.zeros((r_bound - l_bound, BIN))
for key in keys[l_bound:r_bound]:
# augment input image and fix object's orientation and confidence
image, dimension, orientation, confidence = prepare_input_and_output(image_dir, all_objs[key])
#plt.figure(figsize=(5,5))
#plt.imshow(image/255./2.); plt.show()
#print dimension
#print orientation
#print confidence
x_batch[currt_inst, :] = image
d_batch[currt_inst, :] = dimension
o_batch[currt_inst, :] = orientation
c_batch[currt_inst, :] = confidence
currt_inst += 1
yield x_batch, [d_batch, o_batch, c_batch]
l_bound = r_bound
r_bound = r_bound + batch_size
if r_bound > num_obj: r_bound = num_obj
| 6,100 | 33.083799 | 106 | py |
Beholder-GAN | Beholder-GAN-master/tfutil.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import os
import sys
import inspect
import importlib
import imp
import numpy as np
from collections import OrderedDict
import tensorflow as tf
from tensorflow.python import debug as tf_debug
import pdb
import os
import scipy
# ----------------------------------------------------------------------------
# Convenience.
def run(*args, **kwargs): # Run the specified ops in the default session.
# GUI tensorflow debugger using tensorboard
# session = tf.Session()
# with tf_debug.TensorBoardDebugWrapperSession(session, 'localhost:6064') as sess:
# sess.run(*args, **kwargs)
# return
# CLI tensorflow debugger
# session = tf.Session()
# with tf_debug.LocalCLIDebugWrapperSession(session) as sess:
# sess.run(*args, **kwargs)
# return
# with tf.Session() as sess:
# import pdb
# pdb.set_trace()
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x):
return isinstance(x, tf.Tensor) or isinstance(x, tf.Variable) or isinstance(x, tf.Operation)
def shape_to_list(shape):
return [dim.value for dim in shape]
def flatten(x):
with tf.name_scope('Flatten'):
return tf.reshape(x, [-1])
def log2(x):
with tf.name_scope('Log2'):
return tf.log(x) * np.float32(1.0 / np.log(2.0))
def exp2(x):
with tf.name_scope('Exp2'):
return tf.exp(x * np.float32(np.log(2.0)))
def lerp(a, b, t):
with tf.name_scope('Lerp'):
return a + (b - a) * t
def lerp_clip(a, b, t):
with tf.name_scope('LerpClip'):
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def absolute_name_scope(scope): # Forcefully enter the specified name scope, ignoring any surrounding scopes.
return tf.name_scope(scope + '/')
# ----------------------------------------------------------------------------
# Initialize TensorFlow graph and session using good default settings.
def init_tf(config_dict=dict()):
if tf.get_default_session() is None:
tf.set_random_seed(np.random.randint(1 << 31))
create_session(config_dict, force_as_default=True)
# ----------------------------------------------------------------------------
# Create tf.Session based on config dict of the form
# {'gpu_options.allow_growth': True}
def create_session(config_dict=dict(), force_as_default=False):
config = tf.ConfigProto()
for key, value in config_dict.items():
fields = key.split('.')
obj = config
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
session = tf.Session(config=config)
if force_as_default:
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__()
return session
# ----------------------------------------------------------------------------
# Initialize all tf.Variables that have not already been initialized.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
# tf.variables_initializer(tf.report_unitialized_variables()).run()
def init_uninited_vars(vars=None):
if vars is None: vars = tf.global_variables()
test_vars = [];
test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/IsVariableInitialized:0'))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(':')[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
# ----------------------------------------------------------------------------
# Set the values of given tf.Variables.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
# tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
def set_vars(var_to_value_dict):
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(
var.name.replace(':0', '/setter:0')) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(':')[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, 'new_value'),
name='setter') # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
# ----------------------------------------------------------------------------
# Autosummary creates an identity op that internally keeps track of the input
# values and automatically shows up in TensorBoard. The reported value
# represents an average over input components. The average is accumulated
# constantly over time and flushed when save_summaries() is called.
#
# Notes:
# - The output tensor must be used as an input for something else in the
# graph. Otherwise, the autosummary op will not get executed, and the average
# value will not get accumulated.
# - It is perfectly fine to include autosummaries with the same name in
# several places throughout the graph, even if they are executed concurrently.
# - It is ok to also pass in a python scalar or numpy array. In this case, it
# is added to the average immediately.
_autosummary_vars = OrderedDict() # name => [var, ...]
_autosummary_immediate = OrderedDict() # name => update_op, update_value
_autosummary_finalized = False
def autosummary(name, value):
id = name.replace('/', '_')
if is_tf_expression(value):
with tf.name_scope('summary_' + id), tf.device(value.device):
update_op = _create_autosummary_var(name, value)
with tf.control_dependencies([update_op]):
return tf.identity(value)
else: # python scalar or numpy array
if name not in _autosummary_immediate:
with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None):
update_value = tf.placeholder(tf.float32)
update_op = _create_autosummary_var(name, update_value)
_autosummary_immediate[name] = update_op, update_value
update_op, update_value = _autosummary_immediate[name]
run(update_op, {update_value: np.float32(value)})
return value
# Create the necessary ops to include autosummaries in TensorBoard report.
# Note: This should be done only once per graph.
def finalize_autosummaries():
global _autosummary_finalized
if _autosummary_finalized:
return
_autosummary_finalized = True
init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars])
with tf.device(None), tf.control_dependencies(None):
for name, vars in _autosummary_vars.items():
id = name.replace('/', '_')
with absolute_name_scope('Autosummary/' + id):
sum = tf.add_n(vars)
avg = sum[0] / sum[1]
with tf.control_dependencies([avg]): # read before resetting
reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars]
with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
tf.summary.scalar(name, avg)
# Internal helper for creating autosummary accumulators.
def _create_autosummary_var(name, value_expr):
assert not _autosummary_finalized
v = tf.cast(value_expr, tf.float32)
if v.shape.ndims is 0:
v = [v, np.float32(1.0)]
elif v.shape.ndims is 1:
v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
else:
v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
with tf.control_dependencies(None):
var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _autosummary_vars:
_autosummary_vars[name].append(var)
else:
_autosummary_vars[name] = [var]
return update_op
# ----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call.
_summary_merge_op = None
def save_summaries(filewriter, global_step=None):
global _summary_merge_op
if _summary_merge_op is None:
finalize_autosummaries()
with tf.device(None), tf.control_dependencies(None):
_summary_merge_op = tf.summary.merge_all()
filewriter.add_summary(_summary_merge_op.eval(), global_step)
# ----------------------------------------------------------------------------
# Utilities for importing modules and objects by name.
def import_module(module_or_obj_name):
parts = module_or_obj_name.split('.')
parts[0] = {'np': 'numpy', 'tf': 'tensorflow'}.get(parts[0], parts[0])
for i in range(len(parts), 0, -1):
try:
module = importlib.import_module('.'.join(parts[:i]))
relative_obj_name = '.'.join(parts[i:])
return module, relative_obj_name
except ImportError:
pass
raise ImportError(module_or_obj_name)
def find_obj_in_module(module, relative_obj_name):
obj = module
for part in relative_obj_name.split('.'):
obj = getattr(obj, part)
return obj
def import_obj(obj_name):
module, relative_obj_name = import_module(obj_name)
return find_obj_in_module(module, relative_obj_name)
def call_func_by_name(*args, func=None, **kwargs):
assert func is not None
return import_obj(func)(*args, **kwargs)
# ----------------------------------------------------------------------------
# Wrapper for tf.train.Optimizer that automatically takes care of:
# - Gradient averaging for multi-GPU training.
# - Dynamic loss scaling and typecasts for FP16 training.
# - Ignoring corrupted gradients that contain NaNs/Infs.
# - Reporting statistics.
# - Well-chosen default settings.
class Optimizer:
def __init__(
self,
name='Train',
tf_optimizer='tf.train.AdamOptimizer',
learning_rate=0.001,
use_loss_scaling=False,
loss_scaling_init=64.0,
loss_scaling_inc=0.0005,
loss_scaling_dec=1.0,
**kwargs):
# Init fields.
self.name = name
self.learning_rate = tf.convert_to_tensor(learning_rate)
self.id = self.name.replace('/', '.')
self.scope = tf.get_default_graph().unique_name(self.id)
self.optimizer_class = import_obj(tf_optimizer)
self.optimizer_kwargs = dict(kwargs)
self.use_loss_scaling = use_loss_scaling
self.loss_scaling_init = loss_scaling_init
self.loss_scaling_inc = loss_scaling_inc
self.loss_scaling_dec = loss_scaling_dec
self._grad_shapes = None # [shape, ...]
self._dev_opt = OrderedDict() # device => optimizer
self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...]
self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor)
self._updates_applied = False
# Register the gradients of the given loss function with respect to the given variables.
# Intended to be called once per GPU.
def register_gradients(self, loss, vars):
assert not self._updates_applied
# Validate arguments.
if isinstance(vars, dict):
vars = list(vars.values()) # allow passing in Network.trainables as vars
assert isinstance(vars, list) and len(vars) >= 1
assert all(is_tf_expression(expr) for expr in vars + [loss])
if self._grad_shapes is None:
self._grad_shapes = [shape_to_list(var.shape) for var in vars]
assert len(vars) == len(self._grad_shapes)
assert all(shape_to_list(var.shape) == var_shape for var, var_shape in zip(vars, self._grad_shapes))
dev = loss.device
assert all(var.device == dev for var in vars)
# Register device and compute gradients.
with tf.name_scope(self.id + '_grad'), tf.device(dev):
if dev not in self._dev_opt:
opt_name = self.scope.replace('/', '_') + '_opt%d' % len(self._dev_opt)
self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate,
**self.optimizer_kwargs)
self._dev_grads[dev] = []
loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
grads = self._dev_opt[dev].compute_gradients(loss, vars,
gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage
grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in
grads] # replace disconnected gradients with zeros
self._dev_grads[dev].append(grads)
# Construct training op to update the registered variables based on their gradients.
def apply_updates(self):
assert not self._updates_applied
self._updates_applied = True
devices = list(self._dev_grads.keys())
total_grads = sum(len(grads) for grads in self._dev_grads.values())
assert len(devices) >= 1 and total_grads >= 1
ops = []
with absolute_name_scope(self.scope):
# Cast gradients to FP32 and calculate partial sum within each device.
dev_grads = OrderedDict() # device => [(grad, var), ...]
for dev_idx, dev in enumerate(devices):
with tf.name_scope('ProcessGrads%d' % dev_idx), tf.device(dev):
sums = []
for gv in zip(*self._dev_grads[dev]):
assert all(v is gv[0][1] for g, v in gv)
g = [tf.cast(g, tf.float32) for g, v in gv]
g = g[0] if len(g) == 1 else tf.add_n(g)
sums.append((g, gv[0][1]))
dev_grads[dev] = sums
# Sum gradients across devices.
if len(devices) > 1:
with tf.name_scope('SumAcrossGPUs'), tf.device(None):
for var_idx, grad_shape in enumerate(self._grad_shapes):
g = [dev_grads[dev][var_idx][0] for dev in devices]
if np.prod(grad_shape): # nccl does not support zero-sized tensors
g = tf.contrib.nccl.all_sum(g)
for dev, gg in zip(devices, g):
dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])
# Apply updates separately on each device.
for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
with tf.name_scope('ApplyGrads%d' % dev_idx), tf.device(dev):
# Scale gradients as needed.
if self.use_loss_scaling or total_grads > 1:
with tf.name_scope('Scale'):
coef = tf.constant(np.float32(1.0 / total_grads), name='coef')
coef = self.undo_loss_scaling(coef)
grads = [(g * coef, v) for g, v in grads]
# Check for overflows.
with tf.name_scope('CheckOverflow'):
grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))
# Update weights and adjust loss scaling.
with tf.name_scope('UpdateWeights'):
opt = self._dev_opt[dev]
ls_var = self.get_loss_scaling_var(dev)
if not self.use_loss_scaling:
ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))
else:
ops.append(tf.cond(grad_ok,
lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc),
opt.apply_gradients(grads)),
lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))
# Report statistics on the last device.
if dev == devices[-1]:
with tf.name_scope('Statistics'):
ops.append(autosummary(self.id + '/learning_rate', self.learning_rate))
ops.append(autosummary(self.id + '/overflow_frequency', tf.where(grad_ok, 0, 1)))
if self.use_loss_scaling:
ops.append(autosummary(self.id + '/loss_scaling_log2', ls_var))
# Initialize variables and group everything into a single op.
self.reset_optimizer_state()
init_uninited_vars(list(self._dev_ls_var.values()))
return tf.group(*ops, name='TrainingOp')
# Reset internal state of the underlying optimizer.
def reset_optimizer_state(self):
run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])
# Get or create variable representing log2 of the current dynamic loss scaling factor.
def get_loss_scaling_var(self, device):
if not self.use_loss_scaling:
return None
if device not in self._dev_ls_var:
with absolute_name_scope(self.scope + '/LossScalingVars'), tf.control_dependencies(None):
self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name='loss_scaling_var')
return self._dev_ls_var[device]
# Apply dynamic loss scaling for the given expression.
def apply_loss_scaling(self, value):
assert is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * exp2(self.get_loss_scaling_var(value.device))
# Undo the effect of dynamic loss scaling for the given expression.
def undo_loss_scaling(self, value):
assert is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * exp2(-self.get_loss_scaling_var(value.device))
# ----------------------------------------------------------------------------
# Generic network abstraction.
#
# Acts as a convenience wrapper for a parameterized network construction
# function, providing several utility methods and convenient access to
# the inputs/outputs/weights.
#
# Network objects can be safely pickled and unpickled for long-term
# archival purposes. The pickling works reliably as long as the underlying
# network construction function is defined in a standalone Python module
# that has no side effects or application-specific imports.
network_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
_network_import_modules = [] # Temporary modules create during pickle import.
class Network:
def __init__(self,
name=None, # Network name. Used to select TensorFlow name and variable scopes.
func=None, # Fully qualified name of the underlying network construction function.
**static_kwargs): # Keyword arguments to be passed in to the network construction function.
self._init_fields()
self.name = name
self.static_kwargs = dict(static_kwargs)
# Init build func.
module, self._build_func_name = import_module(func)
self._build_module_src = inspect.getsource(module)
self._build_func = find_obj_in_module(module, self._build_func_name)
# Init graph.
self._init_graph()
self.reset_vars()
def _init_fields(self):
self.name = None # User-specified name, defaults to build func name if None.
self.scope = None # Unique TF graph scope, derived from the user-specified name.
self.static_kwargs = dict() # Arguments passed to the user-supplied build func.
self.num_inputs = 0 # Number of input tensors.
self.num_outputs = 0 # Number of output tensors.
self.input_shapes = [[]] # Input tensor shapes (NC or NCHW), including minibatch dimension.
self.output_shapes = [[]] # Output tensor shapes (NC or NCHW), including minibatch dimension.
self.input_shape = [] # Short-hand for input_shapes[0].
self.output_shape = [] # Short-hand for output_shapes[0].
self.input_templates = [] # Input placeholders in the template graph.
self.output_templates = [] # Output tensors in the template graph.
self.input_names = [] # Name string for each input.
self.output_names = [] # Name string for each output.
self.vars = OrderedDict() # All variables (localname => var).
self.trainables = OrderedDict() # Trainable variables (localname => var).
self._build_func = None # User-supplied build function that constructs the network.
self._build_func_name = None # Name of the build function.
self._build_module_src = None # Full source code of the module containing the build function.
self._run_cache = dict() # Cached graph data for Network.run().
def _init_graph(self):
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
# Build template graph.
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
assert tf.get_variable_scope().name == self.scope
with absolute_name_scope(self.scope): # ignore surrounding name_scope
with tf.control_dependencies(None): # ignore surrounding control_dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
# Collect outputs.
assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
# Populate remaining fields.
self.input_shapes = [shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.vars = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])
self.trainables = OrderedDict(
[(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])
# Run initializers for all variables defined by this network.
def reset_vars(self):
run([var.initializer for var in self.vars.values()])
# Run initializers for all trainable variables defined by this network.
def reset_trainables(self):
run([var.initializer for var in self.trainables.values()])
# Get TensorFlow expression(s) for the output(s) of this network, given the inputs.
def get_output_for(self, *in_expr, return_as_list=False, **dynamic_kwargs):
assert len(in_expr) == self.num_inputs
all_kwargs = dict(self.static_kwargs)
all_kwargs.update(dynamic_kwargs)
with tf.variable_scope(self.scope, reuse=True):
assert tf.get_variable_scope().name == self.scope
named_inputs = [tf.identity(expr, name=name) for expr, name in zip(in_expr, self.input_names)]
out_expr = self._build_func(*named_inputs, **all_kwargs)
assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
if return_as_list:
out_expr = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
return out_expr
# Get the local name of a given variable, excluding any surrounding name scopes.
def get_var_localname(self, var_or_globalname):
assert is_tf_expression(var_or_globalname) or isinstance(var_or_globalname, str)
globalname = var_or_globalname if isinstance(var_or_globalname, str) else var_or_globalname.name
assert globalname.startswith(self.scope + '/')
localname = globalname[len(self.scope) + 1:]
localname = localname.split(':')[0]
return localname
# Find variable by local or global name.
def find_var(self, var_or_localname):
assert is_tf_expression(var_or_localname) or isinstance(var_or_localname, str)
return self.vars[var_or_localname] if isinstance(var_or_localname, str) else var_or_localname
# Get the value of a given variable as NumPy array.
# Note: This method is very inefficient -- prefer to use tfutil.run(list_of_vars) whenever possible.
def get_var(self, var_or_localname):
return self.find_var(var_or_localname).eval()
# Set the value of a given variable based on the given NumPy array.
# Note: This method is very inefficient -- prefer to use tfutil.set_vars() whenever possible.
def set_var(self, var_or_localname, new_value):
return set_vars({self.find_var(var_or_localname): new_value})
# Pickle export.
def __getstate__(self):
return {
'version': 2,
'name': self.name,
'static_kwargs': self.static_kwargs,
'build_module_src': self._build_module_src,
'build_func_name': self._build_func_name,
'variables': list(zip(self.vars.keys(), run(list(self.vars.values()))))}
# Pickle import.
def __setstate__(self, state):
self._init_fields()
# Execute custom import handlers.
for handler in network_import_handlers:
state = handler(state)
# Set basic fields.
assert state['version'] == 2
self.name = state['name']
self.static_kwargs = state['static_kwargs']
self._build_module_src = state['build_module_src']
self._build_func_name = state['build_func_name']
# Parse imported module.
module = imp.new_module('_tfutil_network_import_module_%d' % len(_network_import_modules))
exec (self._build_module_src, module.__dict__)
self._build_func = find_obj_in_module(module, self._build_func_name)
_network_import_modules.append(module) # avoid gc
# Init graph.
self._init_graph()
self.reset_vars()
set_vars({self.find_var(name): value for name, value in state['variables']})
# Create a clone of this network with its own copy of the variables.
def clone(self, name=None):
net = object.__new__(Network)
net._init_fields()
net.name = name if name is not None else self.name
net.static_kwargs = dict(self.static_kwargs)
net._build_module_src = self._build_module_src
net._build_func_name = self._build_func_name
net._build_func = self._build_func
net._init_graph()
net.copy_vars_from(self)
return net
# Copy the values of all variables from the given network.
def copy_vars_from(self, src_net):
assert isinstance(src_net, Network)
name_to_value = run({name: src_net.find_var(name) for name in self.vars.keys()})
set_vars({self.find_var(name): value for name, value in name_to_value.items()})
# Copy the values of all trainable variables from the given network.
def copy_trainables_from(self, src_net):
assert isinstance(src_net, Network)
name_to_value = run({name: src_net.find_var(name) for name in self.trainables.keys()})
set_vars({self.find_var(name): value for name, value in name_to_value.items()})
# Create new network with the given parameters, and copy all variables from this network.
def convert(self, name=None, func=None, **static_kwargs):
net = Network(name, func, **static_kwargs)
net.copy_vars_from(self)
return net
# Construct a TensorFlow op that updates the variables of this network
# to be slightly closer to those of the given network.
def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0):
assert isinstance(src_net, Network)
with absolute_name_scope(self.scope):
with tf.name_scope('MovingAvg'):
ops = []
for name, var in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
new_value = lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
# Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
def run(self, *in_arrays,
return_as_list=False,
# True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
print_progress=False, # Print progress to the console? Useful for very large input arrays.
minibatch_size=None, # Maximum minibatch size to use, None = disable batching.
num_gpus=1, # Number of GPUs to use.
out_mul=1.0, # Multiplicative constant to apply to the output(s).
out_add=0.0, # Additive constant to apply to the output(s).
out_shrink=1, # Shrink the spatial dimensions of the output(s) by the given factor.
out_dtype=None, # Convert the output to the specified data type.
**dynamic_kwargs): # Additional keyword arguments to pass into the network construction function.
assert len(in_arrays) == self.num_inputs
num_items = in_arrays[0].shape[0]
if minibatch_size is None:
minibatch_size = num_items
key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])
# Build graph.
if key not in self._run_cache:
with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None):
in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))
out_split = []
for gpu in range(num_gpus):
with tf.device('/gpu:%d' % gpu):
out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)
if out_mul != 1.0:
out_expr = [x * out_mul for x in out_expr]
if out_add != 0.0:
out_expr = [x + out_add for x in out_expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
out_expr = [
tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x
in out_expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
out_expr = [tf.round(x) for x in out_expr]
out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
out_split.append(out_expr)
self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
# Run minibatches.
out_expr = self._run_cache[key]
out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
for mb_begin in range(0, num_items, minibatch_size):
if print_progress:
print('\r%d / %d' % (mb_begin, num_items), end='')
mb_end = min(mb_begin + minibatch_size, num_items)
mb_in = [src[mb_begin: mb_end] for src in in_arrays]
mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in)))
for dst, src in zip(out_arrays, mb_out):
dst[mb_begin: mb_end] = src
# Done.
if print_progress:
print('\r%d / %d' % (num_items, num_items))
if not return_as_list:
out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
return out_arrays
# Returns a list of (name, output_expr, trainable_vars) tuples corresponding to
# individual layers of the network. Mainly intended to be used for reporting.
def list_layers(self):
patterns_to_ignore = ['/Setter', '/new_value', '/Shape', '/strided_slice', '/Cast', '/concat']
all_ops = tf.get_default_graph().get_operations()
all_ops = [op for op in all_ops if not any(p in op.name for p in patterns_to_ignore)]
layers = []
def recurse(scope, parent_ops, level):
prefix = scope + '/'
ops = [op for op in parent_ops if op.name == scope or op.name.startswith(prefix)]
# Does not contain leaf nodes => expand immediate children.
if level == 0 or all('/' in op.name[len(prefix):] for op in ops):
visited = set()
for op in ops:
suffix = op.name[len(prefix):]
if '/' in suffix:
suffix = suffix[:suffix.index('/')]
if suffix not in visited:
recurse(prefix + suffix, ops, level + 1)
visited.add(suffix)
# Otherwise => interpret as a layer.
else:
layer_name = scope[len(self.scope) + 1:]
layer_output = ops[-1].outputs[0]
layer_trainables = [op.outputs[0] for op in ops if
op.type.startswith('Variable') and self.get_var_localname(
op.name) in self.trainables]
layers.append((layer_name, layer_output, layer_trainables))
recurse(self.scope, all_ops, 0)
return layers
# Print a summary table of the network structure.
def print_layers(self, title=None, hide_layers_with_no_params=False):
if title is None: title = self.name
print()
print('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape'))
print('%-28s%-12s%-24s%-24s' % (('---',) * 4))
total_params = 0
for layer_name, layer_output, layer_trainables in self.list_layers():
weights = [var for var in layer_trainables if var.name.endswith('/weight:0')]
num_params = sum(np.prod(shape_to_list(var.shape)) for var in layer_trainables)
total_params += num_params
if hide_layers_with_no_params and num_params == 0:
continue
print('%-28s%-12s%-24s%-24s' % (
layer_name,
num_params if num_params else '-',
layer_output.shape,
weights[0].shape if len(weights) == 1 else '-'))
print('%-28s%-12s%-24s%-24s' % (('---',) * 4))
print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', ''))
print()
# Construct summary ops to include histograms of all trainable parameters in TensorBoard.
def setup_weight_histograms(self, title=None):
if title is None: title = self.name
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
for localname, var in self.trainables.items():
if '/' in localname:
p = localname.split('/')
name = title + '_' + p[-1] + '/' + '_'.join(p[:-1])
else:
name = title + '_toplevel/' + localname
tf.summary.histogram(name, var)
# ----------------------------------------------------------------------------
# receives an image path and extracts 4096 VGG's last layer features
def find_vgg_features_tf(self, img):
input_img = tf.transpose(img, [0, 3, 2, 1])
input_img = tf.transpose(input_img, [0, 2, 1, 3])
input_img = tf.image.resize_images(input_img, (224, 224))
with tf.gfile.FastGFile('./models/vgg_tf.pb', 'rb') as model_file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(model_file.read())
output = tf.import_graph_def(graph_def, input_map={'zero_padding2d_1_input:0': input_img},
return_elements=['conv2d_16/BiasAdd:0'], name='')
return output[0]
# This function takes all that run takes + etalons and finds the latents
# that approximate the etalon images.
# to use call: Gs.reverse_gan_for_etalons(latents, labels, etalons)
# where etalons.shape is for eg. (?, 1024, 1024, 3) ~ [-1:1]
# Returns the history of latents with the last solution being the best.
def reverse_gan_for_etalons(self,
*in_arrays, # Expects start values of latents, any labels and etalon images.
results_dir, # Source directory to import the G
dest_dir, # target directory to save the outputs
iters, # Number of iterations
learning_rate, # Initial learning rate
alpha, # Weight of normal loss in relation to vgg loss
iterations_to_save=2000,
stohastic_clipping = True,
return_as_list = False, # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
print_progress = False, # Print progress to the console? Useful for very large input arrays.
minibatch_size = None, # Maximum minibatch size to use, None = disable batching.
num_gpus = 1, # Number of GPUs to use.
out_mul = 1.0, # Multiplicative constant to apply to the output(s).
out_add = 0.0, # Additive constant to apply to the output(s).
out_shrink = 1, # Shrink the spatial dimensions of the output(s) by the given factor.
out_dtype = None, # Convert the output to the specified data type.
**dynamic_kwargs): # Additional keyword arguments to pass into the network construction function.
assert len(in_arrays) == 3
num_items = in_arrays[0].shape[0]
if minibatch_size is None:
minibatch_size = num_items
key = str([list(sorted(dynamic_kwargs.items())),
num_gpus,
out_mul,
out_add,
out_shrink,
out_dtype])
# Build graph. Same is in Run fuction.
if key not in self._run_cache:
with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None):
in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))
out_split = []
for gpu in range(num_gpus):
with tf.device('/gpu:%d' % gpu):
out_expr = self.get_output_for(*in_split[gpu],
return_as_list=True,
**dynamic_kwargs)
if out_mul != 1.0:
out_expr = [x * out_mul for x in out_expr]
if out_add != 0.0:
out_expr = [x + out_add for x in out_expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
out_expr = [tf.nn.avg_pool(x,
ksize=ksize,
strides=ksize,
padding='VALID',
data_format='NCHW') for x in out_expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
out_expr = [tf.round(x) for x in out_expr]
out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
out_split.append(out_expr)
self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
# Output tensor and GT tensor
out_expr = self._run_cache[key]
psy_name = str(self.scope + '/etalon')
psy = tf.placeholder(tf.float32, out_expr[0].shape, name=psy_name)
# Loss function: alpha*MSELoss + (1-alpha)*VGGLoss
loss = alpha * (tf.losses.mean_squared_error(labels=psy, predictions=out_expr[0])) + (1-alpha) * (tf.losses.mean_squared_error(labels=self.find_vgg_features_tf(psy), predictions=self.find_vgg_features_tf(out_expr[0])))
latents_name = self.input_templates[0].name
input_latents = tf.get_default_graph().get_tensor_by_name(latents_name)
labels_name = self.input_templates[1].name
input_labels = tf.get_default_graph().get_tensor_by_name(labels_name)
# Gradients computation
latents_gradient = tf.gradients(loss, input_latents)
labels_gradient = tf.gradients(loss, input_labels)
gradient = tf.concat([latents_gradient, labels_gradient], 2)
# We modify existing template to feed etalons
# into the loss and gradient tensors:
templ = self.input_templates
templ.append(psy)
# Create a new feed dictionary:
feed_dict = dict(zip(templ, in_arrays))
# Return loss and the gradient with it's feed dictionary
l_rate = learning_rate
latents = in_arrays[0]
labels = in_arrays[1]
samples_num = latents.shape[0]
# for recording the story of iterations
history = []
c_min = 1e+9
x_min = None
x_min = None
G, D, Gs = misc.load_network_pkl(results_dir, None)
# Here is main optimisation logic. Stohastic clipping is
# from 'Precise Recovery of Latent Vectors from Generative
# Adversarial Networks', ICLR 2017 workshop track
# [arxiv]. https://arxiv.org/abs/1702.04782
for i in range(iters):
g = tf.get_default_session().run(
[loss, gradient],
feed_dict=feed_dict)
g_latents = np.expand_dims(g[1][0][0][:512], 0)
g_labels = np.expand_dims(g[1][0][0][512:], 0)
latents = latents - l_rate * g_latents
labels = labels - l_rate * g_labels
# Standard clipping
if stohastic_clipping:
# Stohastic clipping
for j in range(samples_num):
edge1 = np.where(latents[j] >= 1.)[0]
edge2 = np.where(latents[j] <= -1)[0]
if edge1.shape[0] > 0:
rand_el1 = np.random.uniform(-1, 1, size=(1, edge1.shape[0]))
latents[j, edge1] = rand_el1
if edge2.shape[0] > 0:
rand_el2 = np.random.uniform(-1, 1, size=(1, edge2.shape[0]))
latents[j, edge2] = rand_el2
edge1 = np.where(labels[j] > 1.)[0]
edge2 = np.where(labels[j] < 0.)[0]
if edge1.shape[0] > 0:
rand_el1 = np.random.uniform(-1, 1, size=(1, edge1.shape[0]))
labels[j, edge1] = rand_el1
if edge2.shape[0] > 0:
rand_el2 = np.random.uniform(-1, 1, size=(1, edge2.shape[0]))
labels[j, edge2] = rand_el2
else:
latents = np.clip(latents, -1, 1)
labels = np.clip(labels, 0, 1)
# Udating the dictionary for next itteration.
feed_dict[input_latents] = latents
feed_dict[input_labels] = labels
if g[0] < c_min:
# Saving the best latents and labels
c_min = g[0]
x_min = latents
y_min = labels
if i % 50 == 0 and i != 0:
# We reduce the learning rate every 50 iterations
if i == 1000:
l_rate /= 5
if i % 20000 == 0:
l_rate /= 2
# And record the history
history.append((g[0], latents))
print(i, g[0]/samples_num)
print(labels)
print(labels.mean())
if i % iterations_to_save == 0 and i > 0:
print("saving reconstruction output for iteration num {}".format(i))
iteration_name = 'best_restored_latent_vector_' + str(i) + '.npy'
np.save(os.path.join(dest_dir, iteration_name), x_min)
for k in range(10):
y_pred = y_min
y_pred = y_pred + (k*0.05)
# infer conditioned noise to receive image
image = Gs.run(x_min, y_pred, minibatch_size=1, num_gpus=1, out_mul=127.5, out_add=127.5, out_shrink=1, out_dtype=np.uint8)
# save generated image as 'i.png' and noise vector as noise_vector.txt
misc.save_image_grid(image, os.path.join(dest_dir, '{}_{}.png'.format('%04d' % i, k)), [0, 255], [1, 1])
# We return back the optimisation history of latents
history.append((c_min, x_min))
return history
#----------------------------------------------------------------------------
| 66,879 | 46.131783 | 226 | py |
Beholder-GAN | Beholder-GAN-master/legacy.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import pickle
import inspect
import numpy as np
import tfutil
import networks
#----------------------------------------------------------------------------
# Custom unpickler that is able to load network pickles produced by
# the old Theano implementation.
class LegacyUnpickler(pickle.Unpickler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def find_class(self, module, name):
if module == 'network' and name == 'Network':
return tfutil.Network
return super().find_class(module, name)
#----------------------------------------------------------------------------
# Import handler for tfutil.Network that silently converts networks produced
# by the old Theano implementation to a suitable format.
theano_gan_remap = {
'G_paper': 'G_paper',
'G_progressive_8': 'G_paper',
'D_paper': 'D_paper',
'D_progressive_8': 'D_paper'}
def patch_theano_gan(state):
if 'version' in state or state['build_func_spec']['func'] not in theano_gan_remap:
return state
spec = dict(state['build_func_spec'])
func = spec.pop('func')
resolution = spec.get('resolution', 32)
resolution_log2 = int(np.log2(resolution))
use_wscale = spec.get('use_wscale', True)
assert spec.pop('label_size', 0) == 0
assert spec.pop('use_batchnorm', False) == False
assert spec.pop('tanh_at_end', None) is None
assert spec.pop('mbstat_func', 'Tstdeps') == 'Tstdeps'
assert spec.pop('mbstat_avg', 'all') == 'all'
assert spec.pop('mbdisc_kernels', None) is None
spec.pop( 'use_gdrop', True) # doesn't make a difference
assert spec.pop('use_layernorm', False) == False
spec[ 'fused_scale'] = False
spec[ 'mbstd_group_size'] = 16
vars = []
param_iter = iter(state['param_values'])
relu = np.sqrt(2); linear = 1.0
def flatten2(w): return w.reshape(w.shape[0], -1)
def he_std(gain, w): return gain / np.sqrt(np.prod(w.shape[:-1]))
def wscale(gain, w): return w * next(param_iter) / he_std(gain, w) if use_wscale else w
def layer(name, gain, w): return [(name + '/weight', wscale(gain, w)), (name + '/bias', next(param_iter))]
if func.startswith('G'):
vars += layer('4x4/Dense', relu/4, flatten2(next(param_iter).transpose(1,0,2,3)))
vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
for res in range(3, resolution_log2 + 1):
vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
for lod in range(0, resolution_log2 - 1):
vars += layer('ToRGB_lod%d' % lod, linear, next(param_iter)[np.newaxis, np.newaxis])
if func.startswith('D'):
vars += layer('FromRGB_lod0', relu, next(param_iter)[np.newaxis, np.newaxis])
for res in range(resolution_log2, 2, -1):
vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
vars += layer('FromRGB_lod%d' % (resolution_log2 - (res - 1)), relu, next(param_iter)[np.newaxis, np.newaxis])
vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
vars += layer('4x4/Dense0', relu, flatten2(next(param_iter)[:,:,::-1,::-1]).transpose())
vars += layer('4x4/Dense1', linear, next(param_iter))
vars += [('lod', state['toplevel_params']['cur_lod'])]
return {
'version': 2,
'name': func,
'build_module_src': inspect.getsource(networks),
'build_func_name': theano_gan_remap[func],
'static_kwargs': spec,
'variables': vars}
tfutil.network_import_handlers.append(patch_theano_gan)
#----------------------------------------------------------------------------
# Import handler for tfutil.Network that ignores unsupported/deprecated
# networks produced by older versions of the code.
def ignore_unknown_theano_network(state):
if 'version' in state:
return state
print('Ignoring unknown Theano network:', state['build_func_spec']['func'])
return {
'version': 2,
'name': 'Dummy',
'build_module_src': 'def dummy(input, **kwargs): input.set_shape([None, 1]); return input',
'build_func_name': 'dummy',
'static_kwargs': {},
'variables': []}
tfutil.network_import_handlers.append(ignore_unknown_theano_network)
#----------------------------------------------------------------------------
| 24,724 | 46.275335 | 122 | py |
Beholder-GAN | Beholder-GAN-master/inference_cond.py | import os
import misc
import numpy as np
import pdb
from config import EasyDict
import tfutil
import argparse
# initialize parser arguments
parser = argparse.ArgumentParser()
parser.add_argument('--results_dir', '-results_dir', help='name of training experiment folder', default='dean_cond_batch16', type=str)
parser.add_argument('--outputs', '-outputs', help='how many sequences to print', default=500, type=int)
parser.add_argument('--labels_size', '-labels_size', help='size of labels vector', default=60, type=int)
parser.add_argument('--beauty_levels', '-beauty_levels', help='number of possible beauty levels', default=5, type=int)
parser.add_argument('--total_agreement', dest='total_agreement', help='all voters agreed on same beauty level',action='store_true')
parser.add_argument('--classification', dest='classification', help='if asked, use classification conditioning instead of original', action='store_true')
args = parser.parse_args()
# manual parameters
result_subdir = misc.create_result_subdir('results', 'inference_test')
misc.init_output_logging()
# initialize TensorFlow
print('Initializing TensorFlow...')
env = EasyDict() # Environment variables, set by the main program in train.py.
env.TF_CPP_MIN_LOG_LEVEL = '1' # Print warnings and errors, but disable debug info.
env.CUDA_VISIBLE_DEVICES = '1' # Unspecified (default) = Use all available GPUs. List of ints = CUDA device numbers to use. change to '0' if first GPU is better
os.environ.update(env)
tf_config = EasyDict() # TensorFlow session config, set by tfutil.init_tf().
tf_config['graph_options.place_pruned_graph'] = True # False (default) = Check that all ops are available on the designated device.
tfutil.init_tf(tf_config)
#load network
network_pkl = misc.locate_network_pkl(args.results_dir)
print('Loading network from "%s"...' % network_pkl)
G, D, Gs = misc.load_network_pkl(args.results_dir, None)
# sample <args.output> sequences
for j in range(args.outputs):
# change the random seed in every iteration
np.random.seed(j)
# generate random noise
latents = misc.random_latents(1, Gs, random_state=np.random.RandomState(j))
# if classification asked, perform conditioning using classification vector
if args.classification:
for i in range(args.labels_size):
# initiate conditioned label
labels = np.zeros([1, args.labels_size], np.float32)
labels[0][i] = 1.0
# infer conditioned noise to receive image
image = Gs.run(latents, labels, minibatch_size=1, num_gpus=1, out_mul=127.5, out_add=127.5, out_shrink=1, out_dtype=np.uint8)
# save generated image as 'i.png' and noise vector as noise_vector.txt
misc.save_image_grid(image, os.path.join(result_subdir, '{}_{}.png'.format('%04d' % j,i)), [0,255], [1,1])
# save latent space for later use
np.save(os.path.join(result_subdir,'latents_vector.npy'), latents)
# classification is not asked, we will use varied conditioning vector
else:
min_beauty_level = 1.0 / args.beauty_levels
std = min_beauty_level / 2.0 - (min_beauty_level / 10.0)
for i in range(args.beauty_levels):
# initiate beauty rates label
if args.total_agreement:
labels = np.ones(args.labels_size)
labels = labels * (min_beauty_level * (i +1))
labels = np.expand_dims(labels, axis=0)
else:
labels = np.random.normal(min_beauty_level*(i+1), std, [1, args.labels_size])
labels = np.clip(labels, 0.0, 1.0)
# infer conditioned noise to receive image
image = Gs.run(latents, labels, minibatch_size=1, num_gpus=1, out_mul=127.5, out_add=127.5, out_shrink=1, out_dtype=np.uint8)
# save generated image as 'i.png' and noise vector as noise_vector.txt
misc.save_image_grid(image, os.path.join(result_subdir, '{}_{}.png'.format('%04d' % j,i)), [0,255], [1,1])
# save latent space for later use
np.save(os.path.join(result_subdir,'latents_vector.npy'), latents)
if j % 10 == 0:
print("saved {}/{} images".format(j,args.outputs))
| 4,341 | 46.195652 | 160 | py |
Beholder-GAN | Beholder-GAN-master/beautify_image.py | import os
import misc
import numpy as np
import pdb
from config import EasyDict
import tfutil
import argparse
import csv
import tensorflow as tf
import tensorflow_hub as hub
import PIL
from PIL import Image
import matplotlib.pyplot as plt
# initialize parser arguments
parser = argparse.ArgumentParser()
parser.add_argument('--results_dir', '-results_dir', help='name of training experiment folder', default='dean_cond_batch16', type=str)
parser.add_argument('--labels_size', '-labels_size', help='size of labels vector', default=60, type=int)
parser.add_argument('--iters', '-iters', help='learning rate of algorithm', default=100000, type=int)
parser.add_argument('--lr', '-lr', help='learning rate of algorithm', default=0.1, type=float)
parser.add_argument('--alpha', '-alpha', help='weight of normal loss in relation to vgg loss', default=0.7, type=float)
parser.add_argument('--gpu', '-gpu', help='gpu index for the algorithm to run on', default='0', type=str)
parser.add_argument('--image_path', '-image_path', help='full path to image', default='../datasets/CelebA-HQ/img/03134.png', type=str)
parser.add_argument('--resolution', '-resolution', help='resolution of the generated image', default=256, type=int)
args = parser.parse_args()
# manual parameters
result_subdir = misc.create_result_subdir('results', 'inference_test')
misc.init_output_logging()
# initialize TensorFlow
print('Initializing TensorFlow...')
env = EasyDict() # Environment variables, set by the main program in train.py.
env.TF_CPP_MIN_LOG_LEVEL = '1' # Print warnings and errors, but disable debug info.
env.CUDA_VISIBLE_DEVICES = args.gpu # Unspecified (default) = Use all available GPUs. List of ints = CUDA device numbers to use. change to '0' if first GPU is better
os.environ.update(env)
tf_config = EasyDict() # TensorFlow session config, set by tfutil.init_tf().
tf_config['graph_options.place_pruned_graph'] = True # False (default) = Check that all ops are available on the designated device.
tf_config['gpu_options.allow_growth'] = True
tfutil.init_tf(tf_config)
# load network
network_pkl = misc.locate_network_pkl(args.results_dir)
print('Loading network from "%s"...' % network_pkl)
G, D, Gs = misc.load_network_pkl(args.results_dir, None)
# initiate random input
latents = misc.random_latents(1, Gs, random_state=np.random.RandomState(800))
labels = np.random.rand(1, args.labels_size)
# upload image and convert to input tensor
img = PIL.Image.open(args.image_path)
img = img.resize((args.resolution,args.resolution), Image.ANTIALIAS)
img.save((args.image_path).split('/')[-1]) # save image for debug purposes
img = np.asarray(img)
img = img.transpose(2, 0, 1)
img = np.expand_dims(img, axis=0)
img = (img / 127.5) - 1.0 # normalization
# execute algorithm
history = Gs.reverse_gan_for_etalons(latents, labels, img, results_dir=args.results_dir, dest_dir=result_subdir, iters=args.iters, learning_rate=args.lr, alpha=args.alpha)
# save history of latents
with open(result_subdir+'/history_of_latents.txt', 'w') as f:
for item in history:
f.write("{}\n".format(item))
f.write("\n")
| 3,112 | 44.115942 | 171 | py |
Beholder-GAN | Beholder-GAN-master/loss.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import numpy as np
import tensorflow as tf
import pdb
import tfutil
#----------------------------------------------------------------------------
# Convenience func that casts all of its arguments to tf.float32.
def fp32(*values):
if len(values) == 1 and isinstance(values[0], tuple):
values = values[0]
values = tuple(tf.cast(v, tf.float32) for v in values)
return values if len(values) >= 2 else values[0]
#----------------------------------------------------------------------------
# Generator loss function used in the paper (WGAN + AC-GAN).
def G_wgan_acgan(G, D, opt, training_set, minibatch_size,
cond_weight = 1.0): # Weight of the conditioning term.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = -fake_scores_out
"""
if D.output_shapes[1][1] > 0:
with tf.name_scope('LabelPenalty'):
label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
#label_penalty_fakes = tf.losses.mean_squared_error(labels=labels, predictions=fake_labels_out)
loss += label_penalty_fakes * cond_weight
"""
return loss
#----------------------------------------------------------------------------
# Discriminator loss function used in the paper (WGAN-GP + AC-GAN).
def D_wgangp_acgan(G, D, opt, training_set, minibatch_size, reals, labels,
wgan_lambda = 10.0, # Weight for the gradient penalty term.
wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}.
wgan_target = 1.0, # Target value for gradient magnitudes.
cond_weight = 1.0): # Weight of the conditioning terms.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out, real_labels_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixing_factors_labels = tf.random_uniform([minibatch_size, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_labels = tfutil.lerp(labels, labels, mixing_factors_labels)
mixed_scores_out, mixed_labels_out = fp32(D.get_output_for(mixed_images_out, mixed_labels, is_training=True))
mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
"""
if D.output_shapes[1][1] > 0:
with tf.name_scope('LabelPenalty'):
label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out)
label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
#label_penalty_reals = tf.losses.mean_squared_error(labels=labels, predictions=real_labels_out)
#label_penalty_fakes = tf.losses.mean_squared_error(labels=labels, predictions=fake_labels_out)
label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals)
label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes)
loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
"""
return loss
#----------------------------------------------------------------------------
| 24,514 | 48.325956 | 117 | py |
Beholder-GAN | Beholder-GAN-master/misc.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import os
import sys
import glob
import datetime
import pickle
import re
import numpy as np
from collections import OrderedDict
import scipy.ndimage
import PIL.Image
import config
import dataset
import legacy
#----------------------------------------------------------------------------
# Convenience wrappers for pickle that are able to load data produced by
# older versions of the code.
def load_pkl(filename):
with open(filename, 'rb') as file:
return legacy.LegacyUnpickler(file, encoding='latin1').load()
def save_pkl(obj, filename):
with open(filename, 'wb') as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
#----------------------------------------------------------------------------
# Image utils.
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def create_image_grid(images, grid_size=None):
assert images.ndim == 3 or images.ndim == 4
num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2]
if grid_size is not None:
grid_w, grid_h = tuple(grid_size)
else:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype)
for idx in range(num):
x = (idx % grid_w) * img_w
y = (idx // grid_w) * img_h
grid[..., y : y + img_h, x : x + img_w] = images[idx]
return grid
def convert_to_pil_image(image, drange=[0,1]):
assert image.ndim == 2 or image.ndim == 3
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0] # grayscale CHW => HW
else:
image = image.transpose(1, 2, 0) # CHW -> HWC
image = adjust_dynamic_range(image, drange, [0,255])
image = np.rint(image).clip(0, 255).astype(np.uint8)
format = 'RGB' if image.ndim == 3 else 'L'
return PIL.Image.fromarray(image, format)
def save_image(image, filename, drange=[0,1], quality=95):
img = convert_to_pil_image(image, drange)
if '.jpg' in filename:
img.save(filename,"JPEG", quality=quality, optimize=True)
else:
img.save(filename)
def save_image_grid(images, filename, drange=[0,1], grid_size=None):
convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename)
#----------------------------------------------------------------------------
# Logging of stdout and stderr to a file.
class OutputLogger(object):
def __init__(self):
self.file = None
self.buffer = ''
def set_log_file(self, filename, mode='wt'):
assert self.file is None
self.file = open(filename, mode)
if self.buffer is not None:
self.file.write(self.buffer)
self.buffer = None
def write(self, data):
if self.file is not None:
self.file.write(data)
if self.buffer is not None:
self.buffer += data
def flush(self):
if self.file is not None:
self.file.flush()
class TeeOutputStream(object):
def __init__(self, child_streams, autoflush=False):
self.child_streams = child_streams
self.autoflush = autoflush
def write(self, data):
for stream in self.child_streams:
stream.write(data)
if self.autoflush:
self.flush()
def flush(self):
for stream in self.child_streams:
stream.flush()
output_logger = None
def init_output_logging():
global output_logger
if output_logger is None:
output_logger = OutputLogger()
sys.stdout = TeeOutputStream([sys.stdout, output_logger], autoflush=True)
sys.stderr = TeeOutputStream([sys.stderr, output_logger], autoflush=True)
def set_output_log_file(filename, mode='wt'):
if output_logger is not None:
output_logger.set_log_file(filename, mode)
#----------------------------------------------------------------------------
# Reporting results.
def create_result_subdir(result_dir, desc):
# Select run ID and create subdir.
while True:
run_id = 0
for fname in glob.glob(os.path.join(result_dir, '*')):
try:
fbase = os.path.basename(fname)
ford = int(fbase[:fbase.find('-')])
run_id = max(run_id, ford + 1)
except ValueError:
pass
result_subdir = os.path.join(result_dir, '%03d-%s' % (run_id, desc))
try:
os.makedirs(result_subdir)
break
except OSError:
if os.path.isdir(result_subdir):
continue
raise
print("Saving results to", result_subdir)
set_output_log_file(os.path.join(result_subdir, 'log.txt'))
# Export config.
try:
with open(os.path.join(result_subdir, 'config.txt'), 'wt') as fout:
for k, v in sorted(config.__dict__.items()):
if not k.startswith('_'):
fout.write("%s = %s\n" % (k, str(v)))
except:
pass
return result_subdir
def format_time(seconds):
s = int(np.rint(seconds))
if s < 60: return '%ds' % (s)
elif s < 60*60: return '%dm %02ds' % (s // 60, s % 60)
elif s < 24*60*60: return '%dh %02dm %02ds' % (s // (60*60), (s // 60) % 60, s % 60)
else: return '%dd %02dh %02dm' % (s // (24*60*60), (s // (60*60)) % 24, (s // 60) % 60)
#----------------------------------------------------------------------------
# Locating results.
def locate_result_subdir(run_id_or_result_subdir):
if isinstance(run_id_or_result_subdir, str) and os.path.isdir(run_id_or_result_subdir):
return run_id_or_result_subdir
searchdirs = []
searchdirs += ['']
searchdirs += ['results']
searchdirs += ['networks']
for searchdir in searchdirs:
dir = config.result_dir if searchdir == '' else os.path.join(config.result_dir, searchdir)
dir = os.path.join(dir, str(run_id_or_result_subdir))
if os.path.isdir(dir):
return dir
prefix = '%03d' % run_id_or_result_subdir if isinstance(run_id_or_result_subdir, int) else str(run_id_or_result_subdir)
dirs = sorted(glob.glob(os.path.join(config.result_dir, searchdir, prefix + '-*')))
dirs = [dir for dir in dirs if os.path.isdir(dir)]
if len(dirs) == 1:
return dirs[0]
raise IOError('Cannot locate result subdir for run', run_id_or_result_subdir)
def list_network_pkls(run_id_or_result_subdir, include_final=True):
result_subdir = locate_result_subdir(run_id_or_result_subdir)
pkls = sorted(glob.glob(os.path.join(result_subdir, 'network-*.pkl')))
if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl':
if include_final:
pkls.append(pkls[0])
del pkls[0]
return pkls
def locate_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot=None):
if isinstance(run_id_or_result_subdir_or_network_pkl, str) and os.path.isfile(run_id_or_result_subdir_or_network_pkl):
return run_id_or_result_subdir_or_network_pkl
pkls = list_network_pkls(run_id_or_result_subdir_or_network_pkl)
if len(pkls) >= 1 and snapshot is None:
return pkls[-1]
for pkl in pkls:
try:
name = os.path.splitext(os.path.basename(pkl))[0]
number = int(name.split('-')[-1])
if number == snapshot:
return pkl
except ValueError: pass
except IndexError: pass
raise IOError('Cannot locate network pkl for snapshot', snapshot)
def get_id_string_for_network_pkl(network_pkl):
p = network_pkl.replace('.pkl', '').replace('\\', '/').split('/')
return '-'.join(p[max(len(p) - 2, 0):])
#----------------------------------------------------------------------------
# Loading and using trained networks.
def load_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot=None):
return load_pkl(locate_network_pkl(run_id_or_result_subdir_or_network_pkl, snapshot))
def random_latents(num_latents, G, random_state=None):
if random_state is not None:
return random_state.randn(num_latents, *G.input_shape[1:]).astype(np.float32)
else:
return np.random.randn(num_latents, *G.input_shape[1:]).astype(np.float32)
def load_dataset_for_previous_run(run_id, **kwargs): # => dataset_obj, mirror_augment
result_subdir = locate_result_subdir(run_id)
# Parse config.txt.
parsed_cfg = dict()
with open(os.path.join(result_subdir, 'config.txt'), 'rt') as f:
for line in f:
if line.startswith('dataset =') or line.startswith('train ='):
exec(line, parsed_cfg, parsed_cfg)
dataset_cfg = parsed_cfg.get('dataset', dict())
train_cfg = parsed_cfg.get('train', dict())
mirror_augment = train_cfg.get('mirror_augment', False)
# Handle legacy options.
if 'h5_path' in dataset_cfg:
dataset_cfg['tfrecord_dir'] = dataset_cfg.pop('h5_path').replace('.h5', '')
if 'mirror_augment' in dataset_cfg:
mirror_augment = dataset_cfg.pop('mirror_augment')
if 'max_labels' in dataset_cfg:
v = dataset_cfg.pop('max_labels')
if v is None: v = 0
if v == 'all': v = 'full'
dataset_cfg['max_label_size'] = v
if 'max_images' in dataset_cfg:
dataset_cfg.pop('max_images')
# Handle legacy dataset names.
v = dataset_cfg['tfrecord_dir']
v = v.replace('-32x32', '').replace('-32', '')
v = v.replace('-128x128', '').replace('-128', '')
v = v.replace('-256x256', '').replace('-256', '')
v = v.replace('-1024x1024', '').replace('-1024', '')
v = v.replace('celeba-hq', 'celebahq')
v = v.replace('cifar-10', 'cifar10')
v = v.replace('cifar-100', 'cifar100')
v = v.replace('mnist-rgb', 'mnistrgb')
v = re.sub('lsun-100k-([^-]*)', 'lsun-\\1-100k', v)
v = re.sub('lsun-full-([^-]*)', 'lsun-\\1-full', v)
dataset_cfg['tfrecord_dir'] = v
# Load dataset.
dataset_cfg.update(kwargs)
dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **dataset_cfg)
return dataset_obj, mirror_augment
def apply_mirror_augment(minibatch):
mask = np.random.rand(minibatch.shape[0]) < 0.5
minibatch = np.array(minibatch)
minibatch[mask] = minibatch[mask, :, :, ::-1]
return minibatch
#----------------------------------------------------------------------------
# Text labels.
_text_label_cache = OrderedDict()
def draw_text_label(img, text, x, y, alignx=0.5, aligny=0.5, color=255, opacity=1.0, glow_opacity=1.0, **kwargs):
color = np.array(color).flatten().astype(np.float32)
assert img.ndim == 3 and img.shape[2] == color.size or color.size == 1
alpha, glow = setup_text_label(text, **kwargs)
xx, yy = int(np.rint(x - alpha.shape[1] * alignx)), int(np.rint(y - alpha.shape[0] * aligny))
xb, yb = max(-xx, 0), max(-yy, 0)
xe, ye = min(alpha.shape[1], img.shape[1] - xx), min(alpha.shape[0], img.shape[0] - yy)
img = np.array(img)
slice = img[yy+yb : yy+ye, xx+xb : xx+xe, :]
slice[:] = slice * (1.0 - (1.0 - (1.0 - alpha[yb:ye, xb:xe]) * (1.0 - glow[yb:ye, xb:xe] * glow_opacity)) * opacity)[:, :, np.newaxis]
slice[:] = slice + alpha[yb:ye, xb:xe, np.newaxis] * (color * opacity)[np.newaxis, np.newaxis, :]
return img
def setup_text_label(text, font='Calibri', fontsize=32, padding=6, glow_size=2.0, glow_coef=3.0, glow_exp=2.0, cache_size=100): # => (alpha, glow)
# Lookup from cache.
key = (text, font, fontsize, padding, glow_size, glow_coef, glow_exp)
if key in _text_label_cache:
value = _text_label_cache[key]
del _text_label_cache[key] # LRU policy
_text_label_cache[key] = value
return value
# Limit cache size.
while len(_text_label_cache) >= cache_size:
_text_label_cache.popitem(last=False)
# Render text.
import moviepy.editor # pip install moviepy
alpha = moviepy.editor.TextClip(text, font=font, fontsize=fontsize).mask.make_frame(0)
alpha = np.pad(alpha, padding, mode='constant', constant_values=0.0)
glow = scipy.ndimage.gaussian_filter(alpha, glow_size)
glow = 1.0 - np.maximum(1.0 - glow * glow_coef, 0.0) ** glow_exp
# Add to cache.
value = (alpha, glow)
_text_label_cache[key] = value
return value
#----------------------------------------------------------------------------
| 32,539 | 42.386667 | 146 | py |
Beholder-GAN | Beholder-GAN-master/dataset.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import os
import glob
import numpy as np
import tensorflow as tf
import tfutil
import csv
#----------------------------------------------------------------------------
# Parse individual image from a tfrecords file.
def parse_tfrecord_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
def parse_tfrecord_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value
data = ex.features.feature['data'].bytes_list.value[0]
return np.fromstring(data, np.uint8).reshape(shape)
#----------------------------------------------------------------------------
# Dataset class that loads data from tfrecords files.
class TFRecordDataset:
def __init__(self,
tfrecord_dir, # Directory containing a collection of tfrecords files.
resolution = None, # Dataset resolution, None = autodetect.
label_file = None, # Relative path of the labels file, None = autodetect.
max_label_size = 0, # 0 = no labels, 'full' = full labels, <int> = N first label components.
repeat = True, # Repeat dataset indefinitely.
shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling.
prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching.
buffer_mb = 256, # Read buffer size (megabytes).
num_threads = 2): # Number of concurrent threads.
self.tfrecord_dir = tfrecord_dir
self.resolution = None
self.resolution_log2 = None
self.shape = [] # [channel, height, width]
self.dtype = 'uint8'
self.dynamic_range = [0, 255]
self.label_file = label_file
self.label_size = None # [component]
self.label_dtype = None
self._np_labels = None
self._tf_minibatch_in = None
self._tf_labels_var = None
self._tf_labels_dataset = None
self._tf_datasets = dict()
self._tf_iterator = None
self._tf_init_ops = dict()
self._tf_minibatch_np = None
self._cur_minibatch = -1
self._cur_lod = -1
# List tfrecords files and inspect their shapes.
assert os.path.isdir(self.tfrecord_dir)
tfr_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.tfrecords')))
assert len(tfr_files) >= 1
tfr_shapes = []
for tfr_file in tfr_files:
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt):
tfr_shapes.append(parse_tfrecord_np(record).shape)
break
# Autodetect label filename.
if self.label_file is None:
guess = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.labels')))
if len(guess):
self.label_file = guess[0]
elif not os.path.isfile(self.label_file):
guess = os.path.join(self.tfrecord_dir, self.label_file)
if os.path.isfile(guess):
self.label_file = guess
# Determine shape and resolution.
max_shape = max(tfr_shapes, key=lambda shape: np.prod(shape))
self.resolution = resolution if resolution is not None else max_shape[1]
self.resolution_log2 = int(np.log2(self.resolution))
self.shape = [max_shape[0], self.resolution, self.resolution]
tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes]
assert all(shape[0] == max_shape[0] for shape in tfr_shapes)
assert all(shape[1] == shape[2] for shape in tfr_shapes)
assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods))
assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1))
# Load labels.
assert max_label_size == 'full' or max_label_size >= 0
self._np_labels = np.zeros([1<<20, 0], dtype=np.float32)
if self.label_file is not None and max_label_size != 0:
self._np_labels = np.load(self.label_file)
assert self._np_labels.ndim == 2
if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size:
self._np_labels = self._np_labels[:, :max_label_size]
self.label_size = self._np_labels.shape[1]
self.label_dtype = self._np_labels.dtype.name
# Build TF expressions.
with tf.name_scope('Dataset'), tf.device('/cpu:0'):
self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[])
tf_labels_init = tf.zeros(self._np_labels.shape, self._np_labels.dtype)
self._tf_labels_var = tf.Variable(tf_labels_init, name='labels_var')
tfutil.set_vars({self._tf_labels_var: self._np_labels})
self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var)
for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods):
if tfr_lod < 0:
continue
dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20)
dset = dset.map(parse_tfrecord_tf, num_parallel_calls=num_threads)
dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset))
bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize
if shuffle_mb > 0:
dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1)
if repeat:
dset = dset.repeat()
if prefetch_mb > 0:
dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1)
dset = dset.batch(self._tf_minibatch_in)
self._tf_datasets[tfr_lod] = dset
self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes)
self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()}
# Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf().
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and lod in self._tf_datasets
if self._cur_minibatch != minibatch_size or self._cur_lod != lod:
self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size})
self._cur_minibatch = minibatch_size
self._cur_lod = lod
# Get next minibatch as TensorFlow expressions.
def get_minibatch_tf(self): # => images, labels
images, labels = self._tf_iterator.get_next()
return images, labels
# Get next minibatch as NumPy arrays.
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tfutil.run(self._tf_minibatch_np)
# Get random labels as TensorFlow expression.
def get_random_labels_tf(self, minibatch_size): # => labels
if self.label_size > 0:
return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32))
else:
return tf.zeros([minibatch_size, 0], self.label_dtype)
# Get random labels as NumPy array.
def get_random_labels_np(self, minibatch_size): # => labels
if self.label_size > 0:
return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])]
else:
return np.zeros([minibatch_size, 0], self.label_dtype)
#----------------------------------------------------------------------------
# Base class for datasets that are generated on the fly.
class SyntheticDataset:
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'):
self.resolution = resolution
self.resolution_log2 = int(np.log2(resolution))
self.shape = [num_channels, resolution, resolution]
self.dtype = dtype
self.dynamic_range = dynamic_range
self.label_size = label_size
self.label_dtype = label_dtype
self._tf_minibatch_var = None
self._tf_lod_var = None
self._tf_minibatch_np = None
self._tf_labels_np = None
assert self.resolution == 2 ** self.resolution_log2
with tf.name_scope('Dataset'):
self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var')
self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and lod >= 0 and lod <= self.resolution_log2
tfutil.set_vars({self._tf_minibatch_var: minibatch_size, self._tf_lod_var: lod})
def get_minibatch_tf(self): # => images, labels
with tf.name_scope('SyntheticDataset'):
shrink = tf.cast(2.0 ** tf.cast(self._tf_lod_var, tf.float32), tf.int32)
shape = [self.shape[0], self.shape[1] // shrink, self.shape[2] // shrink]
images = self._generate_images(self._tf_minibatch_var, self._tf_lod_var, shape)
labels = self._generate_labels(self._tf_minibatch_var)
return images, labels
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tfutil.run(self._tf_minibatch_np)
def get_random_labels_tf(self, minibatch_size): # => labels
with tf.name_scope('SyntheticDataset'):
return self._generate_labels(minibatch_size)
def get_random_labels_np(self, minibatch_size): # => labels
self.configure(minibatch_size)
if self._tf_labels_np is None:
self._tf_labels_np = self.get_random_labels_tf()
return tfutil.run(self._tf_labels_np)
def _generate_images(self, minibatch, lod, shape): # to be overridden by subclasses
return tf.zeros([minibatch] + shape, self.dtype)
def _generate_labels(self, minibatch): # to be overridden by subclasses
return tf.zeros([minibatch, self.label_size], self.label_dtype)
#----------------------------------------------------------------------------
# Helper func for constructing a dataset object using the given options.
def load_dataset(class_name='dataset.TFRecordDataset', data_dir=None, verbose=False, **kwargs):
adjusted_kwargs = dict(kwargs)
if 'tfrecord_dir' in adjusted_kwargs and data_dir is not None:
adjusted_kwargs['tfrecord_dir'] = os.path.join(data_dir, adjusted_kwargs['tfrecord_dir'])
if verbose:
print('Streaming data using %s...' % class_name)
dataset = tfutil.import_obj(class_name)(**adjusted_kwargs)
if verbose:
print('Dataset shape =', np.int32(dataset.shape).tolist())
print('Dynamic range =', dataset.dynamic_range)
print('Label size =', dataset.label_size)
return dataset
#----------------------------------------------------------------------------
| 31,670 | 47.724615 | 134 | py |
Beholder-GAN | Beholder-GAN-master/networks.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import numpy as np
import tensorflow as tf
import pdb
# NOTE: Do not import any application-specific modules here!
#----------------------------------------------------------------------------
def lerp(a, b, t): return a + (b - a) * t
def lerp_clip(a, b, t): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolutional or fully-connected layer.
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None):
if fan_in is None: fan_in = np.prod(shape[:-1])
std = gain / np.sqrt(fan_in) # He init
if use_wscale:
wscale = tf.constant(np.float32(std), name='wscale')
return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale
else:
return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolutional layer.
def conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Apply bias to the given activation tensor.
def apply_bias(x):
b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros())
b = tf.cast(b, x.dtype)
if len(x.shape) == 2:
return x + b
else:
return x + tf.reshape(b, [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Leaky ReLU activation. Same as tf.nn.leaky_relu, but supports FP16.
def leaky_relu(x, alpha=0.2):
with tf.name_scope('LeakyRelu'):
alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
return tf.maximum(x * alpha, x)
#----------------------------------------------------------------------------
# Nearest-neighbor upscaling layer.
def upscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Upscale2D'):
s = x.shape
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
#----------------------------------------------------------------------------
# Fused upscale2d + conv2d.
# Faster and uses less memory than performing the operations separately.
def upscale2d_conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, fmaps, x.shape[1].value], gain=gain, use_wscale=use_wscale, fan_in=(kernel**2)*x.shape[1].value)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
w = tf.cast(w, x.dtype)
os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Box filter downscaling layer.
def downscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Downscale2D'):
ksize = [1, 1, factor, factor]
return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') # NOTE: requires tf_config['graph_options.place_pruned_graph'] = True
#----------------------------------------------------------------------------
# Fused conv2d + downscale2d.
# Faster and uses less memory than performing the operations separately.
def conv2d_downscale2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Pixelwise feature vector normalization.
def pixel_norm(x, epsilon=1e-8):
with tf.variable_scope('PixelNorm'):
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
#----------------------------------------------------------------------------
# Minibatch standard deviation.
def minibatch_stddev_layer(x, group_size=4):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G.
y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMCHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) # [M111] Take average over fmaps and pixels.
y = tf.cast(y, x.dtype) # [M111] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Generator network used in the paper.
def G_paper(
latents_in, # First input: Latent vectors [minibatch, latent_size].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of output color channels. Overridden based on dataset.
resolution = 32, # Output resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
latent_size = None, # Dimensionality of the latent vectors. None = min(fmap_base, fmap_max).
normalize_latents = True, # Normalize latent vectors before feeding them to the network?
use_wscale = True, # Enable equalized learning rate?
use_pixelnorm = True, # Enable pixelwise feature vector normalization?
pixelnorm_epsilon = 1e-8, # Constant epsilon for pixelwise feature vector normalization.
use_leakyrelu = True, # True = leaky ReLU, False = ReLU.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = True, # True = use fused upscale2d + conv2d, False = separate upscale2d layers.
structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def PN(x): return pixel_norm(x, epsilon=pixelnorm_epsilon) if use_pixelnorm else x
if latent_size is None: latent_size = nf(0)
if structure is None: structure = 'linear' if is_template_graph else 'recursive'
act = leaky_relu if use_leakyrelu else tf.nn.relu
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
#labels_mean = tf.reduce_mean(labels_in, 1)
#labels_mean = tf.expand_dims(labels_mean, 1)
#labels_mean = tf.multiply(labels_mean, 100.0)
#combo_in = tf.cast(tf.concat([latents_in, labels_mean], axis=1), dtype)
combo_in = tf.cast(tf.concat([latents_in, labels_in], axis=1), dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
# Building blocks.
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res == 2: # 4x4
if normalize_latents: x = pixel_norm(x, epsilon=pixelnorm_epsilon)
with tf.variable_scope('Dense'):
x = dense(x, fmaps=nf(res-1)*16, gain=np.sqrt(2)/4, use_wscale=use_wscale) # override gain to match the original Theano implementation
x = tf.reshape(x, [-1, nf(res-1), 4, 4])
x = PN(act(apply_bias(x)))
with tf.variable_scope('Conv'):
x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
else: # 8x8 and up
if fused_scale:
with tf.variable_scope('Conv0_up'):
x = PN(act(apply_bias(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
else:
x = upscale2d(x)
with tf.variable_scope('Conv0'):
x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
with tf.variable_scope('Conv1'):
x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
return x
def torgb(x, res): # res = 2..resolution_log2
lod = resolution_log2 - res
with tf.variable_scope('ToRGB_lod%d' % lod):
return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale))
# Linear structure: simple but inefficient.
if structure == 'linear':
x = block(combo_in, 2)
images_out = torgb(x, 2)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(x, res)
img = torgb(x, res)
images_out = upscale2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def grow(x, res, lod):
y = block(x, res)
img = lambda: upscale2d(torgb(y, res), 2**lod)
if res > 2: img = cset(img, (lod_in > lod), lambda: upscale2d(lerp(torgb(y, res), upscale2d(torgb(x, res - 1)), lod_in - lod), 2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(combo_in, 2, resolution_log2 - 2)
assert images_out.dtype == tf.as_dtype(dtype)
images_out = tf.identity(images_out, name='images_out')
return images_out
#----------------------------------------------------------------------------
# Discriminator network used in the paper.
def D_paper(
images_in, # Input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of input color channels. Overridden based on dataset.
resolution = 32, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
use_wscale = True, # Enable equalized learning rate?
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = True, # True = use fused conv2d + downscale2d, False = separate downscale2d layers.
structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
if structure is None: structure = 'linear' if is_template_graph else 'recursive'
act = leaky_relu
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
# Building blocks.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, use_wscale=use_wscale)))
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res >= 3: # 8x8 and up
with tf.variable_scope('Conv0'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
if fused_scale:
with tf.variable_scope('Conv1_down'):
x = act(apply_bias(conv2d_downscale2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
else:
with tf.variable_scope('Conv1'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
x = downscale2d(x)
else: # 4x4
if mbstd_group_size > 1:
x = minibatch_stddev_layer(x, mbstd_group_size)
with tf.variable_scope('Conv'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
with tf.variable_scope('Dense0'):
x = act(apply_bias(dense(x, fmaps=nf(res-2), use_wscale=use_wscale)))
with tf.variable_scope('Dense1'):
x = apply_bias(dense(x, fmaps=1+label_size, gain=1, use_wscale=use_wscale))
return x
# create a vector of means from beauty rates vector
number_of_means = 4 # decide how many values we want at the end
# split the beauty rates vector into a few vectors, so in case of 4, we get 4 vectors of 15 values
splited_beauty_rates = tf.split(labels_in, number_of_means*[int(label_size/number_of_means)], 1)
# calcute mean of each vector, splited_beauty_rates will be a list of single mean tensors
for i in range(number_of_means):
splited_beauty_rates[i] = tf.expand_dims(splited_beauty_rates[i], 1) # (?, label_size/number_of_means) => (?, 1, label_size/number_of_means)
splited_beauty_rates[i] = tf.reduce_mean(splited_beauty_rates[i], 2) # (?, 1, label_size/number_of_means) => (?, 1)
# concatenate all means tensors into one tensor, so it will get a shape of (?, number_of_means)
means_tensor = tf.concat(splited_beauty_rates, 1)
means_tensor = tf.expand_dims(means_tensor, 1) # (?, number_of_means) => (?, 1, number_of_means)
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
# pad the labels to convert shape of (?, 1, number_of_means) to (?, 1, resolution, resolution)
delta_x = int((resolution - number_of_means)/2) # number of zeros to add on sides
delta_y = int(resolution / 2) # number of zeros to add upwards and downwards
pad_matrix = tf.constant([[0,0],[delta_y-1, delta_y], [delta_x, delta_x]], dtype='int32')
means_in = tf.pad(means_tensor, pad_matrix, "CONSTANT") # (?, 1, number_of_means) => (?, resolution, resolution)
means_in = tf.expand_dims(means_in, 1) # (?, resolution, resolution) => (?, 1, resolution, resolution)
# concatenate images to means
input_in = tf.concat([images_in, means_in], axis=1) # final shape: (?, 4, resolution, resolution)
x = fromrgb(input_in, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
img = downscale2d(img)
# pad the labels to convert shape of (?, 1, number_of_means) to (?, 1, resolution, resolution)
iter_res = 2 ** (res - 1)
delta_x = int((iter_res - number_of_means)/2) # number of zeros to add on sides
delta_y = int(iter_res / 2) # number of zeros to add upwards and downwards
pad_matrix = tf.constant([[0,0],[delta_y-1, delta_y], [delta_x, delta_x]], dtype='int32')
means_in = tf.pad(means_tensor, pad_matrix, "CONSTANT") # (?, 1, number_of_means) => (?, iter_res, iter_res)
means_in = tf.expand_dims(means_in, 1) # (?, iter_res, iter_res) => (?, 1, iter_res, iter_res)
# concatenate images to means
input_in = tf.concat([img, means_in], axis=1) # final shape: (?, 4, iter_res, iter_res)
y = fromrgb(input_in, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = lerp_clip(x, y, lod_in - lod)
combo_out = block(x, 2)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def grow(res, lod):
# pad the labels to convert shape of (?, 1, number_of_means) to (?, 1, 2**res, 2**res)
delta_x = int((2**res - number_of_means)/2) # number of zeros to add on sides
delta_y = int(2**res / 2) # number of zeros to add upwards and downwards
pad_matrix = tf.constant([[0,0],[delta_y-1, delta_y], [delta_x, delta_x]], dtype='int32')
means_in = tf.pad(means_tensor, pad_matrix, "CONSTANT") # (?, 1, number_of_means) => (?, 2**res, 2**res)
means_in = tf.expand_dims(means_in, 1) # (?, 2**res, 2**res) => (?, 1, 2**res, 2**res)
# concatenate images to means
img_downscaled = downscale2d(images_in, 2**lod)
input_in = tf.concat([img_downscaled, means_in], axis=1) # final shape: (?, 4, 2**res, 2**res)
x = lambda: fromrgb(input_in, res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
if res > 2:
# pad the labels to convert shape of (?, 1, number_of_means) to (?, 1, 2**(res-1), 2**(res-1))
delta_x = int((2**(res-1) - number_of_means)/2) # number of zeros to add on sides
delta_y = int(2**(res-1) / 2) # number of zeros to add upwards and downwards
pad_matrix = tf.constant([[0,0],[delta_y-1, delta_y], [delta_x, delta_x]], dtype='int32')
means_in = tf.pad(means_tensor, pad_matrix, "CONSTANT") # (?, 1, number_of_means) => (?, 2**(res-1), 2**(res-1))
means_in = tf.expand_dims(means_in, 1) # (?, 2**(res-1), 2**(res-1)) => (?, 1, 2**(res-1), 2**(res-1))
# concatenate images to means
img_downscaled = downscale2d(images_in, 2**(lod+1))
input_in = tf.concat([img_downscaled, means_in], axis=1) # final shape: (?, 4, 2**(res-1), 2**(res-1))
y = cset(y, (lod_in > lod), lambda: lerp(x, fromrgb(input_in, res - 1), lod_in - lod))
return y()
combo_out = grow(2, resolution_log2 - 2)
assert combo_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(combo_out[:, :1], name='scores_out')
labels_out = tf.identity(combo_out[:, 1:], name='labels_out')
return scores_out, labels_out
#----------------------------------------------------------------------------
| 41,442 | 51.261034 | 167 | py |
Beholder-GAN | Beholder-GAN-master/config.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
#----------------------------------------------------------------------------
# Convenience class that behaves exactly like dict(), but allows accessing
# the keys and values using the attribute syntax, i.e., "mydict.key = value".
class EasyDict(dict):
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
def __getattr__(self, name): return self[name]
def __setattr__(self, name, value): self[name] = value
def __delattr__(self, name): del self[name]
#----------------------------------------------------------------------------
# Paths.
data_dir = '../datasets'
result_dir = 'results'
#----------------------------------------------------------------------------
# TensorFlow options.
tf_config = EasyDict() # TensorFlow session config, set by tfutil.init_tf().
env = EasyDict() # Environment variables, set by the main program in train.py.
tf_config['graph_options.place_pruned_graph'] = True # False (default) = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
#tf_config['gpu_options.allow_growth'] = False # False (default) = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
env.CUDA_VISIBLE_DEVICES = '0,1' # Unspecified (default) = Use all available GPUs. List of ints = CUDA device numbers to use.
env.TF_CPP_MIN_LOG_LEVEL = '1' # 0 (default) = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
#----------------------------------------------------------------------------
# Official training configs, targeted mainly for CelebA-HQ.
# To run, comment/uncomment the lines as appropriate and launch train.py.
desc = 'pgan' # Description string included in result subdir name.
random_seed = 1000 # Global random seed.
dataset = EasyDict() # Options for dataset.load_dataset().
train = EasyDict(func='train.train_progressive_gan') # Options for main training func.
G = EasyDict(func='networks.G_paper') # Options for generator network.
D = EasyDict(func='networks.D_paper') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func='loss.G_wgan_acgan') # Options for generator loss.
D_loss = EasyDict(func='loss.D_wgangp_acgan') # Options for discriminator loss.
sched = EasyDict() # Options for train.TrainingSchedule.
grid = EasyDict(size='1080p', layout='random') # Options for train.setup_snapshot_image_grid().
# Dataset (choose one).
#desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='CelebA-HQ/tf_files'); train.mirror_augment = True
desc += '-beautydataset'; dataset = EasyDict(tfrecord_dir='beauty_dataset/tf_files'); train.mirror_augment = True
#desc += '-beautydataset_asian'; dataset = EasyDict(tfrecord_dir='beauty_dataset/only_asian/tf_files'); train.mirror_augment = True
#desc += '-beautydataset_cm'; dataset = EasyDict(tfrecord_dir='beauty_dataset/splited/caucasian_male/tf_files'); train.mirror_augment = True
#desc += '-beautydataset_cf'; dataset = EasyDict(tfrecord_dir='beauty_dataset/splited/caucasian_female/tf_files'); train.mirror_augment = True
#desc += '-beautydataset_am'; dataset = EasyDict(tfrecord_dir='beauty_dataset/splited/asian_male/tf_files'); train.mirror_augment = True
#desc += '-beautydataset_af'; dataset = EasyDict(tfrecord_dir='beauty_dataset/splited/asian_female/tf_files'); train.mirror_augment = True
#desc += '-400faces'; dataset = EasyDict(tfrecord_dir='400faces/tf_files'); train.mirror_augment = True
#desc += '-celeba'; dataset = EasyDict(tfrecord_dir='celeba'); train.mirror_augment = True
#desc += '-cifar10'; dataset = EasyDict(tfrecord_dir='cifar10')
#desc += '-cifar100'; dataset = EasyDict(tfrecord_dir='cifar100')
#desc += '-svhn'; dataset = EasyDict(tfrecord_dir='svhn')
#desc += '-mnist'; dataset = EasyDict(tfrecord_dir='mnist')
#desc += '-mnistrgb'; dataset = EasyDict(tfrecord_dir='mnistrgb')
#desc += '-syn1024rgb'; dataset = EasyDict(class_name='dataset.SyntheticDataset', resolution=1024, num_channels=3)
#desc += '-lsun-airplane'; dataset = EasyDict(tfrecord_dir='lsun-airplane-100k'); train.mirror_augment = True
#desc += '-lsun-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-100k'); train.mirror_augment = True
#desc += '-lsun-bicycle'; dataset = EasyDict(tfrecord_dir='lsun-bicycle-100k'); train.mirror_augment = True
#desc += '-lsun-bird'; dataset = EasyDict(tfrecord_dir='lsun-bird-100k'); train.mirror_augment = True
#desc += '-lsun-boat'; dataset = EasyDict(tfrecord_dir='lsun-boat-100k'); train.mirror_augment = True
#desc += '-lsun-bottle'; dataset = EasyDict(tfrecord_dir='lsun-bottle-100k'); train.mirror_augment = True
#desc += '-lsun-bridge'; dataset = EasyDict(tfrecord_dir='lsun-bridge-100k'); train.mirror_augment = True
#desc += '-lsun-bus'; dataset = EasyDict(tfrecord_dir='lsun-bus-100k'); train.mirror_augment = True
#desc += '-lsun-car'; dataset = EasyDict(tfrecord_dir='lsun-car-100k'); train.mirror_augment = True
#desc += '-lsun-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-100k'); train.mirror_augment = True
#desc += '-lsun-chair'; dataset = EasyDict(tfrecord_dir='lsun-chair-100k'); train.mirror_augment = True
#desc += '-lsun-churchoutdoor'; dataset = EasyDict(tfrecord_dir='lsun-churchoutdoor-100k'); train.mirror_augment = True
#desc += '-lsun-classroom'; dataset = EasyDict(tfrecord_dir='lsun-classroom-100k'); train.mirror_augment = True
#desc += '-lsun-conferenceroom'; dataset = EasyDict(tfrecord_dir='lsun-conferenceroom-100k'); train.mirror_augment = True
#desc += '-lsun-cow'; dataset = EasyDict(tfrecord_dir='lsun-cow-100k'); train.mirror_augment = True
#desc += '-lsun-diningroom'; dataset = EasyDict(tfrecord_dir='lsun-diningroom-100k'); train.mirror_augment = True
#desc += '-lsun-diningtable'; dataset = EasyDict(tfrecord_dir='lsun-diningtable-100k'); train.mirror_augment = True
#desc += '-lsun-dog'; dataset = EasyDict(tfrecord_dir='lsun-dog-100k'); train.mirror_augment = True
#desc += '-lsun-horse'; dataset = EasyDict(tfrecord_dir='lsun-horse-100k'); train.mirror_augment = True
#desc += '-lsun-kitchen'; dataset = EasyDict(tfrecord_dir='lsun-kitchen-100k'); train.mirror_augment = True
#desc += '-lsun-livingroom'; dataset = EasyDict(tfrecord_dir='lsun-livingroom-100k'); train.mirror_augment = True
#desc += '-lsun-motorbike'; dataset = EasyDict(tfrecord_dir='lsun-motorbike-100k'); train.mirror_augment = True
#desc += '-lsun-person'; dataset = EasyDict(tfrecord_dir='lsun-person-100k'); train.mirror_augment = True
#desc += '-lsun-pottedplant'; dataset = EasyDict(tfrecord_dir='lsun-pottedplant-100k'); train.mirror_augment = True
#desc += '-lsun-restaurant'; dataset = EasyDict(tfrecord_dir='lsun-restaurant-100k'); train.mirror_augment = True
#desc += '-lsun-sheep'; dataset = EasyDict(tfrecord_dir='lsun-sheep-100k'); train.mirror_augment = True
#desc += '-lsun-sofa'; dataset = EasyDict(tfrecord_dir='lsun-sofa-100k'); train.mirror_augment = True
#desc += '-lsun-tower'; dataset = EasyDict(tfrecord_dir='lsun-tower-100k'); train.mirror_augment = True
#desc += '-lsun-train'; dataset = EasyDict(tfrecord_dir='lsun-train-100k'); train.mirror_augment = True
#desc += '-lsun-tvmonitor'; dataset = EasyDict(tfrecord_dir='lsun-tvmonitor-100k'); train.mirror_augment = True
# Conditioning & snapshot options.
desc += '-cond'; dataset.max_label_size = 'full' # conditioned on full label
#desc += '-cond1'; dataset.max_label_size = 1 # conditioned on first component of the label
#desc += '-g4k'; grid.size = '4k'
#desc += '-grpc'; grid.layout = 'row_per_class'
# Config presets (choose one).
#desc += '-preset-v1-1gpu'; num_gpus = 1; D.mbstd_group_size = 16; sched.minibatch_base = 16; sched.minibatch_dict = {256: 14, 512: 6, 1024: 3}; sched.lod_training_kimg = 800; sched.lod_transition_kimg = 800; train.total_kimg = 19000
#desc += '-preset-v2-1gpu'; num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
desc += '-preset-v2-2gpus'; num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}; sched.G_lrate_dict = {512: 0.0015, 1024: 0.002}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-4gpus'; num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}; sched.G_lrate_dict = {256: 0.0015, 512: 0.002, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-8gpus'; num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}; sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
# Numerical precision (choose one).
desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4}
#desc += '-fp16'; G.dtype = 'float16'; D.dtype = 'float16'; G.pixelnorm_epsilon=1e-4; G_opt.use_loss_scaling = True; D_opt.use_loss_scaling = True; sched.max_minibatch_per_gpu = {512: 16, 1024: 8}
# Disable individual features.
#desc += '-nogrowing'; sched.lod_initial_resolution = 1024; sched.lod_training_kimg = 0; sched.lod_transition_kimg = 0; train.total_kimg = 10000
#desc += '-nopixelnorm'; G.use_pixelnorm = False
#desc += '-nowscale'; G.use_wscale = False; D.use_wscale = False
#desc += '-noleakyrelu'; G.use_leakyrelu = False
#desc += '-nosmoothing'; train.G_smoothing = 0.0
#desc += '-norepeat'; train.minibatch_repeats = 1
#desc += '-noreset'; train.reset_opt_for_new_lod = False
# Special modes.
#desc += '-BENCHMARK'; sched.lod_initial_resolution = 4; sched.lod_training_kimg = 3; sched.lod_transition_kimg = 3; train.total_kimg = (8*2+1)*3; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
#desc += '-BENCHMARK0'; sched.lod_initial_resolution = 1024; train.total_kimg = 10; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
#desc += '-VERBOSE'; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1; train.network_snapshot_ticks = 100
#desc += '-GRAPH'; train.save_tf_graph = True
#desc += '-HIST'; train.save_weight_histograms = True
#----------------------------------------------------------------------------
# Utility scripts.
# To run, uncomment the appropriate line and launch train.py.
#train = EasyDict(func='util_scripts.generate_fake_images', run_id=23, num_pngs=1000); num_gpus = 1; desc = 'fake-images-' + str(train.run_id)
#train = EasyDict(func='util_scripts.generate_fake_images', run_id=23, grid_size=[15,8], num_pngs=10, image_shrink=4); num_gpus = 1; desc = 'fake-grids-' + str(train.run_id)
#train = EasyDict(func='util_scripts.generate_interpolation_video', run_id=23, grid_size=[1,1], duration_sec=60.0, smoothing_sec=1.0); num_gpus = 1; desc = 'interpolation-video-' + str(train.run_id)
#train = EasyDict(func='util_scripts.generate_training_video', run_id=23, duration_sec=20.0); num_gpus = 1; desc = 'training-video-' + str(train.run_id)
#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-swd-16k.txt', metrics=['swd'], num_images=16384, real_passes=2); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-fid-10k.txt', metrics=['fid'], num_images=10000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-fid-50k.txt', metrics=['fid'], num_images=50000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-is-50k.txt', metrics=['is'], num_images=50000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
#train = EasyDict(func='util_scripts.evaluate_metrics', run_id=23, log='metric-msssim-20k.txt', metrics=['msssim'], num_images=20000, real_passes=1); num_gpus = 1; desc = train.log.split('.')[0] + '-' + str(train.run_id)
#----------------------------------------------------------------------------
| 33,334 | 59.389493 | 284 | py |
Beholder-GAN | Beholder-GAN-master/dataset_tool.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import os
import sys
import glob
import argparse
import threading
import six.moves.queue as Queue
import traceback
import numpy as np
import tensorflow as tf
import PIL.Image
import csv
import pdb
import tfutil
import dataset
#----------------------------------------------------------------------------
def error(msg):
print('Error: ' + msg)
exit(1)
#----------------------------------------------------------------------------
class TFRecordExporter:
def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10):
self.tfrecord_dir = tfrecord_dir
self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))
self.expected_images = expected_images
self.cur_images = 0
self.shape = None
self.resolution_log2 = None
self.tfr_writers = []
self.print_progress = print_progress
self.progress_interval = progress_interval
if self.print_progress:
print('Creating dataset "%s"' % tfrecord_dir)
if not os.path.isdir(self.tfrecord_dir):
os.makedirs(self.tfrecord_dir)
assert(os.path.isdir(self.tfrecord_dir))
def close(self):
if self.print_progress:
print('%-40s\r' % 'Flushing data...', end='', flush=True)
for tfr_writer in self.tfr_writers:
tfr_writer.close()
self.tfr_writers = []
if self.print_progress:
print('%-40s\r' % '', end='', flush=True)
print('Added %d images.' % self.cur_images)
def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.
order = np.arange(self.expected_images)
np.random.RandomState(123).shuffle(order)
return order
def add_image(self, img):
if self.print_progress and self.cur_images % self.progress_interval == 0:
print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
if self.shape is None:
self.shape = img.shape
self.resolution_log2 = int(np.log2(self.shape[1]))
assert self.shape[0] in [1, 3]
assert self.shape[1] == self.shape[2]
assert self.shape[1] == 2**self.resolution_log2
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
for lod in range(self.resolution_log2 - 1):
tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))
assert img.shape == self.shape
for lod, tfr_writer in enumerate(self.tfr_writers):
if lod:
img = img.astype(np.float32)
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
quant = np.rint(img).clip(0, 255).astype(np.uint8)
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
tfr_writer.write(ex.SerializeToString())
self.cur_images += 1
def add_labels(self, labels):
if self.print_progress:
print('%-40s\r' % 'Saving labels...', end='', flush=True)
assert labels.shape[0] == self.cur_images
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
np.save(f, labels.astype(np.float32))
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#----------------------------------------------------------------------------
class ExceptionInfo(object):
def __init__(self):
self.value = sys.exc_info()[1]
self.traceback = traceback.format_exc()
#----------------------------------------------------------------------------
class WorkerThread(threading.Thread):
def __init__(self, task_queue):
threading.Thread.__init__(self)
self.task_queue = task_queue
def run(self):
while True:
func, args, result_queue = self.task_queue.get()
if func is None:
break
try:
result = func(*args)
except:
result = ExceptionInfo()
result_queue.put((result, args))
#----------------------------------------------------------------------------
class ThreadPool(object):
def __init__(self, num_threads):
assert num_threads >= 1
self.task_queue = Queue.Queue()
self.result_queues = dict()
self.num_threads = num_threads
for idx in range(self.num_threads):
thread = WorkerThread(self.task_queue)
thread.daemon = True
thread.start()
def add_task(self, func, args=()):
assert hasattr(func, '__call__') # must be a function
if func not in self.result_queues:
self.result_queues[func] = Queue.Queue()
self.task_queue.put((func, args, self.result_queues[func]))
def get_result(self, func): # returns (result, args)
result, args = self.result_queues[func].get()
if isinstance(result, ExceptionInfo):
print('\n\nWorker thread caught an exception:\n' + result.traceback)
raise result.value
return result, args
def finish(self):
for idx in range(self.num_threads):
self.task_queue.put((None, (), None))
def __enter__(self): # for 'with' statement
return self
def __exit__(self, *excinfo):
self.finish()
def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None):
if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4
assert max_items_in_flight >= 1
results = []
retire_idx = [0]
def task_func(prepared, idx):
return process_func(prepared)
def retire_result():
processed, (prepared, idx) = self.get_result(task_func)
results[idx] = processed
while retire_idx[0] < len(results) and results[retire_idx[0]] is not None:
yield post_func(results[retire_idx[0]])
results[retire_idx[0]] = None
retire_idx[0] += 1
for idx, item in enumerate(item_iterator):
prepared = pre_func(item)
results.append(None)
self.add_task(func=task_func, args=(prepared, idx))
while retire_idx[0] < idx - max_items_in_flight + 2:
for res in retire_result(): yield res
while retire_idx[0] < len(results):
for res in retire_result(): yield res
#----------------------------------------------------------------------------
def display(tfrecord_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
idx = 0
while True:
try:
images, labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if idx == 0:
print('Displaying images')
import cv2 # pip install opencv-python
cv2.namedWindow('dataset_tool')
print('Press SPACE or ENTER to advance, ESC to exit')
print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist()))
cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR
idx += 1
if cv2.waitKey() == 27:
break
print('\nDisplayed %d images.' % idx)
#----------------------------------------------------------------------------
def extract(tfrecord_dir, output_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
print('Extracting images to "%s"' % output_dir)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
idx = 0
while True:
if idx % 10 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images, labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if images.shape[1] == 1:
img = PIL.Image.fromarray(images[0][0], 'L')
else:
img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB')
img.save(os.path.join(output_dir, 'img%08d.png' % idx))
idx += 1
print('Extracted %d images.' % idx)
#----------------------------------------------------------------------------
def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels):
max_label_size = 0 if ignore_labels else 'full'
print('Loading dataset "%s"' % tfrecord_dir_a)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
print('Loading dataset "%s"' % tfrecord_dir_b)
dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
print('Comparing datasets')
idx = 0
identical_images = 0
identical_labels = 0
while True:
if idx % 100 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images_a, labels_a = dset_a.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_a, labels_a = None, None
try:
images_b, labels_b = dset_b.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_b, labels_b = None, None
if images_a is None or images_b is None:
if images_a is not None or images_b is not None:
print('Datasets contain different number of images')
break
if images_a.shape == images_b.shape and np.all(images_a == images_b):
identical_images += 1
else:
print('Image %d is different' % idx)
if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b):
identical_labels += 1
else:
print('Label %d is different' % idx)
idx += 1
print('Identical images: %d / %d' % (identical_images, idx))
if not ignore_labels:
print('Identical labels: %d / %d' % (identical_labels, idx))
#----------------------------------------------------------------------------
def create_mnist(tfrecord_dir, mnist_dir):
print('Loading MNIST from "%s"' % mnist_dir)
import gzip
with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16)
with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
images = images.reshape(-1, 1, 28, 28)
images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
assert labels.shape == (60000,) and labels.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123):
print('Loading MNIST from "%s"' % mnist_dir)
import gzip
with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16)
images = images.reshape(-1, 28, 28)
images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
with TFRecordExporter(tfrecord_dir, num_images) as tfr:
rnd = np.random.RandomState(random_seed)
for idx in range(num_images):
tfr.add_image(images[rnd.randint(images.shape[0], size=3)])
#----------------------------------------------------------------------------
def create_cifar10(tfrecord_dir, cifar10_dir):
print('Loading CIFAR-10 from "%s"' % cifar10_dir)
import pickle
images = []
labels = []
for batch in range(1, 6):
with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images.append(data['data'].reshape(-1, 3, 32, 32))
labels.append(data['labels'])
images = np.concatenate(images)
labels = np.concatenate(labels)
assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (50000,) and labels.dtype == np.int32
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_cifar100(tfrecord_dir, cifar100_dir):
print('Loading CIFAR-100 from "%s"' % cifar100_dir)
import pickle
with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images = data['data'].reshape(-1, 3, 32, 32)
labels = np.array(data['fine_labels'])
assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (50000,) and labels.dtype == np.int32
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 99
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_svhn(tfrecord_dir, svhn_dir):
print('Loading SVHN from "%s"' % svhn_dir)
import pickle
images = []
labels = []
for batch in range(1, 4):
with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file:
data = pickle.load(file, encoding='latin1')
images.append(data[0])
labels.append(data[1])
images = np.concatenate(images)
labels = np.concatenate(labels)
assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8
assert labels.shape == (73257,) and labels.dtype == np.uint8
assert np.min(images) == 0 and np.max(images) == 255
assert np.min(labels) == 0 and np.max(labels) == 9
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
tfr.add_image(images[order[idx]])
tfr.add_labels(onehot[order])
#----------------------------------------------------------------------------
def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None):
print('Loading LSUN dataset from "%s"' % lmdb_dir)
import lmdb # pip install lmdb
import cv2 # pip install opencv-python
import io
with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn:
total_images = txn.stat()['entries']
if max_images is None:
max_images = total_images
with TFRecordExporter(tfrecord_dir, max_images) as tfr:
for idx, (key, value) in enumerate(txn.cursor()):
try:
try:
img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1)
if img is None:
raise IOError('cv2.imdecode failed')
img = img[:, :, ::-1] # BGR => RGB
except IOError:
img = np.asarray(PIL.Image.open(io.BytesIO(value)))
crop = np.min(img.shape[:2])
img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
img = PIL.Image.fromarray(img, 'RGB')
img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS)
img = np.asarray(img)
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
except:
print(sys.exc_info()[1])
if tfr.cur_images == max_images:
break
#----------------------------------------------------------------------------
def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121):
print('Loading CelebA from "%s"' % celeba_dir)
glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')
image_filenames = sorted(glob.glob(glob_pattern))
expected_images = 202599
if len(image_filenames) != expected_images:
error('Expected to find %d images' % expected_images)
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
assert img.shape == (218, 178, 3)
img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
#----------------------------------------------------------------------------
def create_celebahq(tfrecord_dir, celeba_dir, delta_dir, num_threads=4, num_tasks=100):
print('Loading CelebA from "%s"' % celeba_dir)
expected_images = 202599
if len(glob.glob(os.path.join(celeba_dir, 'img_celeba', '*.jpg'))) != expected_images:
error('Expected to find %d images' % expected_images)
with open(os.path.join(celeba_dir, 'Anno', 'list_landmarks_celeba.txt'), 'rt') as file:
landmarks = [[float(value) for value in line.split()[1:]] for line in file.readlines()[2:]]
landmarks = np.float32(landmarks).reshape(-1, 5, 2)
print('Loading CelebA-HQ deltas from "%s"' % delta_dir)
import scipy.ndimage
import hashlib
import bz2
import zipfile
import base64
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.kdf.pbkdf2
import cryptography.fernet
expected_zips = 30
if len(glob.glob(os.path.join(delta_dir, 'delta*.zip'))) != expected_zips:
error('Expected to find %d zips' % expected_zips)
with open(os.path.join(delta_dir, 'image_list.txt'), 'rt') as file:
lines = [line.split() for line in file]
fields = dict()
for idx, field in enumerate(lines[0]):
type = int if field.endswith('idx') else str
fields[field] = [type(line[idx]) for line in lines[1:]]
indices = np.array(fields['idx'])
# Must use pillow version 3.1.1 for everything to work correctly.
if getattr(PIL, 'PILLOW_VERSION', '') != '3.1.1':
error('create_celebahq requires pillow version 3.1.1') # conda install pillow=3.1.1
# Must use libjpeg version 8d for everything to work correctly.
img = np.array(PIL.Image.open(os.path.join(celeba_dir, 'img_celeba', '000001.jpg')))
md5 = hashlib.md5()
md5.update(img.tobytes())
if md5.hexdigest() != '9cad8178d6cb0196b36f7b34bc5eb6d3':
error('create_celebahq requires libjpeg version 8d') # conda install jpeg=8d
def rot90(v):
return np.array([-v[1], v[0]])
def process_func(idx):
# Load original image.
orig_idx = fields['orig_idx'][idx]
orig_file = fields['orig_file'][idx]
orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file)
img = PIL.Image.open(orig_path)
# Choose oriented crop rectangle.
lm = landmarks[orig_idx]
eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5
mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5
eye_to_eye = lm[1] - lm[0]
eye_to_mouth = mouth_avg - eye_avg
x = eye_to_eye - rot90(eye_to_mouth)
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = rot90(x)
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
zoom = 1024 / (np.hypot(*x) * 2)
# Shrink.
shrink = int(np.floor(0.5 / zoom))
if shrink > 1:
size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink)))
img = img.resize(size, PIL.Image.ANTIALIAS)
quad /= shrink
zoom *= shrink
# Crop.
border = max(int(np.round(1024 * 0.1 / zoom)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Simulate super-resolution.
superres = int(np.exp2(np.ceil(np.log2(zoom))))
if superres > 1:
img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS)
quad *= superres
zoom /= superres
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if max(pad) > border - 4:
pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.mgrid[:h, :w, :1]
mask = 1.0 - np.minimum(np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum(np.float32(w-1-x) / pad[2], np.float32(h-1-y) / pad[3]))
blur = 1024 * 0.02 / zoom
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB')
quad += pad[0:2]
# Transform.
img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
img = img.resize((1024, 1024), PIL.Image.ANTIALIAS)
img = np.asarray(img).transpose(2, 0, 1)
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['proc_md5'][idx]
# Load delta image and original JPG.
with zipfile.ZipFile(os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip:
delta_bytes = zip.read('delta%05d.dat' % idx)
with open(orig_path, 'rb') as file:
orig_bytes = file.read()
# Decrypt delta image, using original JPG data as decryption key.
algorithm = cryptography.hazmat.primitives.hashes.SHA256()
backend = cryptography.hazmat.backends.default_backend()
salt = bytes(orig_file, 'ascii')
kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC(algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend)
key = base64.urlsafe_b64encode(kdf.derive(orig_bytes))
delta = np.frombuffer(bz2.decompress(cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024)
# Apply delta image.
img = img + delta
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['final_md5'][idx]
return img
with TFRecordExporter(tfrecord_dir, indices.size) as tfr:
order = tfr.choose_shuffled_order()
with ThreadPool(num_threads) as pool:
for img in pool.process_items_concurrently(indices[order].tolist(), process_func=process_func, max_items_in_flight=num_tasks):
tfr.add_image(img)
#----------------------------------------------------------------------------
def create_celebahq_cond(tfrecord_dir, celeba_dir, delta_dir, num_threads=4, num_tasks=100, save_images=False):
print('Loading CelebA from "%s"' % celeba_dir)
expected_images = 202599
if len(glob.glob(os.path.join(celeba_dir, 'img_celeba', '*.jpg'))) != expected_images:
error('Expected to find %d images' % expected_images)
with open(os.path.join(celeba_dir, 'Anno', 'list_landmarks_celeba.txt'), 'rt') as file:
landmarks = [[float(value) for value in line.split()[1:]] for line in file.readlines()[2:]]
landmarks = np.float32(landmarks).reshape(-1, 5, 2)
print('Loading CelebA-HQ deltas from "%s"' % delta_dir)
import scipy.ndimage
import hashlib
import bz2
import zipfile
import base64
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.kdf.pbkdf2
import cryptography.fernet
expected_zips = 30
if len(glob.glob(os.path.join(delta_dir, 'delta*.zip'))) != expected_zips:
error('Expected to find %d zips' % expected_zips)
with open(os.path.join(delta_dir, 'image_list.txt'), 'rt') as file:
lines = [line.split() for line in file]
fields = dict()
for idx, field in enumerate(lines[0]):
type = int if field.endswith('idx') else str
fields[field] = [type(line[idx]) for line in lines[1:]]
indices = np.array(fields['idx'])
# Must use pillow version 3.1.1 for everything to work correctly.
if getattr(PIL, 'PILLOW_VERSION', '') != '3.1.1':
error('create_celebahq requires pillow version 3.1.1') # conda install pillow=3.1.1
# Must use libjpeg version 8d for everything to work correctly.
img = np.array(PIL.Image.open(os.path.join(celeba_dir, 'img_celeba', '000001.jpg')))
md5 = hashlib.md5()
md5.update(img.tobytes())
if md5.hexdigest() != '9cad8178d6cb0196b36f7b34bc5eb6d3':
error('create_celebahq requires libjpeg version 8d') # conda install jpeg=8d
def rot90(v):
return np.array([-v[1], v[0]])
def process_func(idx):
# Load original image.
orig_idx = fields['orig_idx'][idx]
orig_file = fields['orig_file'][idx]
orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file)
img = PIL.Image.open(orig_path)
# Choose oriented crop rectangle.
lm = landmarks[orig_idx]
eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5
mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5
eye_to_eye = lm[1] - lm[0]
eye_to_mouth = mouth_avg - eye_avg
x = eye_to_eye - rot90(eye_to_mouth)
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = rot90(x)
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
zoom = 1024 / (np.hypot(*x) * 2)
# Shrink.
shrink = int(np.floor(0.5 / zoom))
if shrink > 1:
size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink)))
img = img.resize(size, PIL.Image.ANTIALIAS)
quad /= shrink
zoom *= shrink
# Crop.
border = max(int(np.round(1024 * 0.1 / zoom)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Simulate super-resolution.
superres = int(np.exp2(np.ceil(np.log2(zoom))))
if superres > 1:
img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS)
quad *= superres
zoom /= superres
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if max(pad) > border - 4:
pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.mgrid[:h, :w, :1]
mask = 1.0 - np.minimum(np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum(np.float32(w-1-x) / pad[2], np.float32(h-1-y) / pad[3]))
blur = 1024 * 0.02 / zoom
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB')
quad += pad[0:2]
# Transform.
img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
img = img.resize((1024, 1024), PIL.Image.ANTIALIAS)
img = np.asarray(img).transpose(2, 0, 1)
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['proc_md5'][idx]
# Load delta image and original JPG.
with zipfile.ZipFile(os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip:
delta_bytes = zip.read('delta%05d.dat' % idx)
with open(orig_path, 'rb') as file:
orig_bytes = file.read()
# Decrypt delta image, using original JPG data as decryption key.
algorithm = cryptography.hazmat.primitives.hashes.SHA256()
backend = cryptography.hazmat.backends.default_backend()
salt = bytes(orig_file, 'ascii')
kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC(algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend)
key = base64.urlsafe_b64encode(kdf.derive(orig_bytes))
delta = np.frombuffer(bz2.decompress(cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024)
# Apply delta image.
img = img + delta
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['final_md5'][idx]
return img
images_dest_dir = os.path.join(os.path.dirname(tfrecord_dir), 'img')
if save_images:
if not os.path.exists(images_dest_dir):
os.makedirs(images_dest_dir)
beauty_rates = load_csv(tfrecord_dir)
# create numpy of [len(images),1] composed of mean values ranged in [0,10]
beauty_rates_mean = np.mean(beauty_rates, axis=1)*10
# round values into their closest integers
beauty_rates_mean = (beauty_rates_mean).astype(int)
# create one hot vector and fill it
beauty_rates_one_hot = np.zeros((beauty_rates.shape[0], np.max(beauty_rates_mean) + 1), dtype=np.float32)
beauty_rates_one_hot[np.arange(beauty_rates.shape[0]), beauty_rates_mean] = 1.0
with TFRecordExporter(tfrecord_dir, indices.size) as tfr:
order = tfr.choose_shuffled_order()
with ThreadPool(num_threads) as pool:
for i,img in enumerate(pool.process_items_concurrently(indices[order].tolist(), process_func=process_func, max_items_in_flight=num_tasks)):
tfr.add_image(img)
if save_images:
im = np.swapaxes(img, 0, 2)
im = PIL.Image.fromarray(im)
im = im.transpose(PIL.Image.ROTATE_270)
im.save("{}/{}.png".format(images_dest_dir, str(order[i]).zfill(5)))
tfr.add_labels(beauty_rates_one_hot[order])
#----------------------------------------------------------------------------
def create_celebahq_cond_continuous(tfrecord_dir, celeba_dir, delta_dir, num_threads=4, num_tasks=100, save_images=False):
print('Loading CelebA from "%s"' % celeba_dir)
expected_images = 202599
if len(glob.glob(os.path.join(celeba_dir, 'img_celeba', '*.jpg'))) != expected_images:
error('Expected to find %d images' % expected_images)
with open(os.path.join(celeba_dir, 'Anno', 'list_landmarks_celeba.txt'), 'rt') as file:
landmarks = [[float(value) for value in line.split()[1:]] for line in file.readlines()[2:]]
landmarks = np.float32(landmarks).reshape(-1, 5, 2)
print('Loading CelebA-HQ deltas from "%s"' % delta_dir)
import scipy.ndimage
import hashlib
import bz2
import zipfile
import base64
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.kdf.pbkdf2
import cryptography.fernet
expected_zips = 30
if len(glob.glob(os.path.join(delta_dir, 'delta*.zip'))) != expected_zips:
error('Expected to find %d zips' % expected_zips)
with open(os.path.join(delta_dir, 'image_list.txt'), 'rt') as file:
lines = [line.split() for line in file]
fields = dict()
for idx, field in enumerate(lines[0]):
type = int if field.endswith('idx') else str
fields[field] = [type(line[idx]) for line in lines[1:]]
indices = np.array(fields['idx'])
# Must use pillow version 3.1.1 for everything to work correctly.
if getattr(PIL, 'PILLOW_VERSION', '') != '3.1.1':
error('create_celebahq requires pillow version 3.1.1') # conda install pillow=3.1.1
# Must use libjpeg version 8d for everything to work correctly.
img = np.array(PIL.Image.open(os.path.join(celeba_dir, 'img_celeba', '000001.jpg')))
md5 = hashlib.md5()
md5.update(img.tobytes())
if md5.hexdigest() != '9cad8178d6cb0196b36f7b34bc5eb6d3':
error('create_celebahq requires libjpeg version 8d') # conda install jpeg=8d
def rot90(v):
return np.array([-v[1], v[0]])
def process_func(idx):
# Load original image.
orig_idx = fields['orig_idx'][idx]
orig_file = fields['orig_file'][idx]
orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file)
img = PIL.Image.open(orig_path)
# Choose oriented crop rectangle.
lm = landmarks[orig_idx]
eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5
mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5
eye_to_eye = lm[1] - lm[0]
eye_to_mouth = mouth_avg - eye_avg
x = eye_to_eye - rot90(eye_to_mouth)
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = rot90(x)
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
zoom = 1024 / (np.hypot(*x) * 2)
# Shrink.
shrink = int(np.floor(0.5 / zoom))
if shrink > 1:
size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink)))
img = img.resize(size, PIL.Image.ANTIALIAS)
quad /= shrink
zoom *= shrink
# Crop.
border = max(int(np.round(1024 * 0.1 / zoom)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Simulate super-resolution.
superres = int(np.exp2(np.ceil(np.log2(zoom))))
if superres > 1:
img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS)
quad *= superres
zoom /= superres
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if max(pad) > border - 4:
pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.mgrid[:h, :w, :1]
mask = 1.0 - np.minimum(np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum(np.float32(w-1-x) / pad[2], np.float32(h-1-y) / pad[3]))
blur = 1024 * 0.02 / zoom
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB')
quad += pad[0:2]
# Transform.
img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
img = img.resize((1024, 1024), PIL.Image.ANTIALIAS)
img = np.asarray(img).transpose(2, 0, 1)
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['proc_md5'][idx]
# Load delta image and original JPG.
with zipfile.ZipFile(os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip:
delta_bytes = zip.read('delta%05d.dat' % idx)
with open(orig_path, 'rb') as file:
orig_bytes = file.read()
# Decrypt delta image, using original JPG data as decryption key.
algorithm = cryptography.hazmat.primitives.hashes.SHA256()
backend = cryptography.hazmat.backends.default_backend()
salt = bytes(orig_file, 'ascii')
kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC(algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend)
key = base64.urlsafe_b64encode(kdf.derive(orig_bytes))
delta = np.frombuffer(bz2.decompress(cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024)
# Apply delta image.
img = img + delta
# Verify MD5.
md5 = hashlib.md5()
md5.update(img.tobytes())
assert md5.hexdigest() == fields['final_md5'][idx]
return img
images_dest_dir = os.path.join(os.path.dirname(tfrecord_dir), 'img')
if save_images:
if not os.path.exists(images_dest_dir):
os.makedirs(images_dest_dir)
beauty_rates = load_csv(tfrecord_dir)
with TFRecordExporter(tfrecord_dir, indices.size) as tfr:
order = tfr.choose_shuffled_order()
with ThreadPool(num_threads) as pool:
for i,img in enumerate(pool.process_items_concurrently(indices[order].tolist(), process_func=process_func, max_items_in_flight=num_tasks)):
tfr.add_image(img)
if save_images:
im = np.swapaxes(img, 0, 2)
im = PIL.Image.fromarray(im)
im = im.transpose(PIL.Image.ROTATE_270)
im.save("{}/{}.png".format(images_dest_dir, str(order[i]).zfill(5)))
tfr.add_labels(beauty_rates[order])
#----------------------------------------------------------------------------
def load_csv(dataset_folder):
# Dictionary to load dataset
# key: image name
# value: list of 60 beauty rates from raters
dataset_dict = {}
# csv will be stored in the parent folder
csv_folder = os.path.dirname(dataset_folder)
# read raters csv file
with open(os.path.join(csv_folder,'All_Ratings.csv'), 'r') as csvfile:
raw_dataset = csv.reader(csvfile, delimiter=',', quotechar='|')
for i, row in enumerate(raw_dataset):
row = ','.join(row)
row = row.split(',')
# create list of rates for each image
if row[1] in dataset_dict:
dataset_dict[row[1]][0].append(float(row[2]))
else:
dataset_dict[row[1]] = [[float(row[2])]]
beauty_rates_list = []
# move dict to lists, convert beauty rates to numpy ranged in [0,1]
keylist = dataset_dict.keys()
for key in sorted(keylist):
beauty_rates_list.append(dataset_dict[key])
# convert dataset_dict to a numpy of beauty rates in shape of [images,1]
beauty_rates_np = (np.array(beauty_rates_list, dtype=np.float32) / 5.0)
# change shape from [images,1,60] to [images,60]
beauty_rates_np = np.squeeze(beauty_rates_np, axis=1)
return beauty_rates_np
#----------------------------------------------------------------------------
def create_from_images(tfrecord_dir, image_dir, shuffle):
print('Loading images from "%s"' % image_dir)
image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
if len(image_filenames) == 0:
error('No input images found')
img = np.asarray(PIL.Image.open(image_filenames[0]))
resolution = img.shape[0]
channels = img.shape[2] if img.ndim == 3 else 1
if img.shape[1] != resolution:
error('Input images must have the same width and height')
if resolution != 2 ** int(np.floor(np.log2(resolution))):
error('Input image resolution must be a power-of-two')
if channels not in [1, 3]:
error('Input images must be stored as RGB or grayscale')
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
#----------------------------------------------------------------------------
def create_from_images_cond(tfrecord_dir, image_dir, shuffle):
print('Loading images from "%s"' % image_dir)
image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
if len(image_filenames) == 0:
error('No input images found')
img = np.asarray(PIL.Image.open(image_filenames[0]))
resolution = img.shape[0]
channels = img.shape[2] if img.ndim == 3 else 1
if img.shape[1] != resolution:
error('Input images must have the same width and height')
if resolution != 2 ** int(np.floor(np.log2(resolution))):
error('Input image resolution must be a power-of-two')
if channels not in [1, 3]:
error('Input images must be stored as RGB or grayscale')
beauty_rates = load_csv(tfrecord_dir)
# create numpy of [len(images),1] composed of mean values ranged in [0,10]
beauty_rates_mean = np.mean(beauty_rates, axis=1)*10
# round values into their closest integers
beauty_rates_mean = (np.rint(beauty_rates_mean)).astype(int)
# create one hot vector and fill it
beauty_rates_one_hot = np.zeros((beauty_rates.shape[0], np.max(beauty_rates_mean) + 1), dtype=np.float32)
beauty_rates_one_hot[np.arange(beauty_rates.shape[0]), beauty_rates_mean] = 1.0
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
tfr.add_labels(beauty_rates_one_hot[order])
#----------------------------------------------------------------------------
def create_from_images_cond_continuous(tfrecord_dir, image_dir, shuffle):
print('Loading images from "%s"' % image_dir)
image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
if len(image_filenames) == 0:
error('No input images found')
img = np.asarray(PIL.Image.open(image_filenames[0]))
resolution = img.shape[0]
channels = img.shape[2] if img.ndim == 3 else 1
if img.shape[1] != resolution:
error('Input images must have the same width and height')
if resolution != 2 ** int(np.floor(np.log2(resolution))):
error('Input image resolution must be a power-of-two')
if channels not in [1, 3]:
error('Input images must be stored as RGB or grayscale')
beauty_rates = load_csv(tfrecord_dir)
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
tfr.add_labels(beauty_rates[order])
#----------------------------------------------------------------------------
def create_from_hdf5(tfrecord_dir, hdf5_filename, shuffle):
print('Loading HDF5 archive from "%s"' % hdf5_filename)
import h5py # conda install h5py
with h5py.File(hdf5_filename, 'r') as hdf5_file:
hdf5_data = max([value for key, value in hdf5_file.items() if key.startswith('data')], key=lambda lod: lod.shape[3])
with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(hdf5_data.shape[0])
for idx in range(order.size):
tfr.add_image(hdf5_data[order[idx]])
npy_filename = os.path.splitext(hdf5_filename)[0] + '-labels.npy'
if os.path.isfile(npy_filename):
tfr.add_labels(np.load(npy_filename)[order])
#----------------------------------------------------------------------------
def execute_cmdline(argv):
prog = argv[0]
parser = argparse.ArgumentParser(
prog = prog,
description = 'Tool for creating, extracting, and visualizing Progressive GAN datasets.',
epilog = 'Type "%s <command> -h" for more information.' % prog)
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
def add_command(cmd, desc, example=None):
epilog = 'Example: %s %s' % (prog, example) if example is not None else None
return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)
p = add_command( 'display', 'Display images in dataset.',
'display datasets/mnist')
p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
p = add_command( 'extract', 'Extract images from dataset.',
'extract datasets/mnist mnist-images')
p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
p.add_argument( 'output_dir', help='Directory to extract the images into')
p = add_command( 'compare', 'Compare two datasets.',
'compare datasets/mydataset datasets/mnist')
p.add_argument( 'tfrecord_dir_a', help='Directory containing first dataset')
p.add_argument( 'tfrecord_dir_b', help='Directory containing second dataset')
p.add_argument( '--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0)
p = add_command( 'create_mnist', 'Create dataset for MNIST.',
'create_mnist datasets/mnist ~/downloads/mnist')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'mnist_dir', help='Directory containing MNIST')
p = add_command( 'create_mnistrgb', 'Create dataset for MNIST-RGB.',
'create_mnistrgb datasets/mnistrgb ~/downloads/mnist')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'mnist_dir', help='Directory containing MNIST')
p.add_argument( '--num_images', help='Number of composite images to create (default: 1000000)', type=int, default=1000000)
p.add_argument( '--random_seed', help='Random seed (default: 123)', type=int, default=123)
p = add_command( 'create_cifar10', 'Create dataset for CIFAR-10.',
'create_cifar10 datasets/cifar10 ~/downloads/cifar10')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'cifar10_dir', help='Directory containing CIFAR-10')
p = add_command( 'create_cifar100', 'Create dataset for CIFAR-100.',
'create_cifar100 datasets/cifar100 ~/downloads/cifar100')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'cifar100_dir', help='Directory containing CIFAR-100')
p = add_command( 'create_svhn', 'Create dataset for SVHN.',
'create_svhn datasets/svhn ~/downloads/svhn')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'svhn_dir', help='Directory containing SVHN')
p = add_command( 'create_lsun', 'Create dataset for single LSUN category.',
'create_lsun datasets/lsun-car-100k ~/downloads/lsun/car_lmdb --resolution 256 --max_images 100000')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'lmdb_dir', help='Directory containing LMDB database')
p.add_argument( '--resolution', help='Output resolution (default: 256)', type=int, default=256)
p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None)
p = add_command( 'create_celeba', 'Create dataset for CelebA.',
'create_celeba datasets/celeba ~/downloads/celeba')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'celeba_dir', help='Directory containing CelebA')
p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89)
p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121)
p = add_command( 'create_celebahq', 'Create dataset for CelebA-HQ.',
'create_celebahq datasets/celebahq ~/downloads/celeba ~/downloads/celeba-hq-deltas')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'celeba_dir', help='Directory containing CelebA')
p.add_argument( 'delta_dir', help='Directory containing CelebA-HQ deltas')
p.add_argument( '--num_threads', help='Number of concurrent threads (default: 4)', type=int, default=4)
p.add_argument( '--num_tasks', help='Number of concurrent processing tasks (default: 100)', type=int, default=100)
p = add_command( 'create_celebahq_cond', 'Create dataset for CelebA-HQ with conditioning.',
'create_celebahq_cond datasets/celebahq ~/downloads/celeba ~/downloads/celeba-hq-deltas')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'celeba_dir', help='Directory containing CelebA')
p.add_argument( 'delta_dir', help='Directory containing CelebA-HQ deltas')
p.add_argument( '--num_threads', help='Number of concurrent threads (default: 4)', type=int, default=4)
p.add_argument( '--num_tasks', help='Number of concurrent processing tasks (default: 100)', type=int, default=100)
p.add_argument( '--save_images', help='Save CelebA-HQ images if requested (default: False)', action='store_true')
p = add_command( 'create_celebahq_cond_continuous', 'Create dataset for CelebA-HQ with continuous conditioning.',
'create_celebahq_cond datasets/celebahq ~/downloads/celeba ~/downloads/celeba-hq-deltas')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'celeba_dir', help='Directory containing CelebA')
p.add_argument( 'delta_dir', help='Directory containing CelebA-HQ deltas')
p.add_argument( '--num_threads', help='Number of concurrent threads (default: 4)', type=int, default=4)
p.add_argument( '--num_tasks', help='Number of concurrent processing tasks (default: 100)', type=int, default=100)
p.add_argument( '--save_images', help='Save CelebA-HQ images if requested (default: False)', action='store_true')
p = add_command( 'create_from_images', 'Create dataset from a directory full of images.',
'create_from_images datasets/mydataset myimagedir')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'image_dir', help='Directory containing the images')
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
p = add_command( 'create_from_images_cond', 'Create dataset from a directory full of images with conditioning.',
'create_from_images_cond datasets/mydataset myimagedir')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'image_dir', help='Directory containing the images, csv file should be adjacent to them')
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
p = add_command( 'create_from_images_cond_continuous', 'Create dataset from a directory full of images with continuous conditioning.',
'create_from_images_cond datasets/mydataset myimagedir')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'image_dir', help='Directory containing the images, csv file should be adjacent to them')
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
p = add_command( 'create_from_hdf5', 'Create dataset from legacy HDF5 archive.',
'create_from_hdf5 datasets/celebahq ~/downloads/celeba-hq-1024x1024.h5')
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
p.add_argument( 'hdf5_filename', help='HDF5 archive containing the images')
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h'])
func = globals()[args.command]
del args.command
func(**vars(args))
#----------------------------------------------------------------------------
if __name__ == "__main__":
execute_cmdline(sys.argv)
#----------------------------------------------------------------------------
| 77,822 | 47.038889 | 163 | py |
Beholder-GAN | Beholder-GAN-master/train.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import os
import time
import numpy as np
import tensorflow as tf
import config
import tfutil
import dataset
import misc
#----------------------------------------------------------------------------
# Choose the size and contents of the image snapshot grids that are exported
# periodically during training.
def setup_snapshot_image_grid(G, training_set,
size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display.
layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label.
# Select size.
gw = 1; gh = 1
if size == '1080p':
gw = np.clip(1920 // G.output_shape[3], 3, 32)
gh = np.clip(1080 // G.output_shape[2], 2, 32)
if size == '4k':
gw = np.clip(3840 // G.output_shape[3], 7, 32)
gh = np.clip(2160 // G.output_shape[2], 4, 32)
# Fill in reals and labels.
reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)
labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)
for idx in range(gw * gh):
x = idx % gw; y = idx // gw
while True:
real, label = training_set.get_minibatch_np(1)
if layout == 'row_per_class' and training_set.label_size > 0:
if label[0, y % training_set.label_size] == 0.0:
continue
reals[idx] = real[0]
labels[idx] = label[0]
break
# Generate latents.
latents = misc.random_latents(gw * gh, G)
#print("latents: {}".format(latents))
return (gw, gh), reals, labels, latents
#----------------------------------------------------------------------------
# Just-in-time processing of training images before feeding them to the networks.
def process_reals(x, lod, mirror_augment, drange_data, drange_net):
with tf.name_scope('ProcessReals'):
with tf.name_scope('DynamicRange'):
x = tf.cast(x, tf.float32)
x = misc.adjust_dynamic_range(x, drange_data, drange_net)
if mirror_augment:
with tf.name_scope('MirrorAugment'):
s = tf.shape(x)
mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
mask = tf.tile(mask, [1, s[1], s[2], s[3]])
x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
s = tf.shape(x)
y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
y = tf.tile(y, [1, 1, 1, 2, 1, 2])
y = tf.reshape(y, [-1, s[1], s[2], s[3]])
x = tfutil.lerp(x, y, lod - tf.floor(lod))
with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
s = tf.shape(x)
factor = tf.cast(2 ** tf.floor(lod), tf.int32)
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
#----------------------------------------------------------------------------
# Class for evaluating and storing the values of time-varying training parameters.
class TrainingSchedule:
def __init__(
self,
cur_nimg,
training_set,
lod_initial_resolution = 4, # Image resolution used at the beginning.
lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution.
lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers.
minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs.
minibatch_dict = {}, # Resolution-specific overrides.
max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU.
G_lrate_base = 0.001, # Learning rate for the generator.
G_lrate_dict = {}, # Resolution-specific overrides.
D_lrate_base = 0.001, # Learning rate for the discriminator.
D_lrate_dict = {}, # Resolution-specific overrides.
tick_kimg_base = 160, # Default interval of progress snapshots.
tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:20, 1024:10}): # Resolution-specific overrides.
# Training phase.
self.kimg = cur_nimg / 1000.0
phase_dur = lod_training_kimg + lod_transition_kimg
phase_idx = int(np.floor(self.kimg / phase_dur)) if phase_dur > 0 else 0
phase_kimg = self.kimg - phase_idx * phase_dur
# Level-of-detail and resolution.
self.lod = training_set.resolution_log2
self.lod -= np.floor(np.log2(lod_initial_resolution))
self.lod -= phase_idx
if lod_transition_kimg > 0:
self.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg
self.lod = max(self.lod, 0.0)
self.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(self.lod)))
# Minibatch size.
self.minibatch = minibatch_dict.get(self.resolution, minibatch_base)
self.minibatch -= self.minibatch % config.num_gpus
if self.resolution in max_minibatch_per_gpu:
self.minibatch = min(self.minibatch, max_minibatch_per_gpu[self.resolution] * config.num_gpus)
# Other parameters.
self.G_lrate = G_lrate_dict.get(self.resolution, G_lrate_base)
self.D_lrate = D_lrate_dict.get(self.resolution, D_lrate_base)
self.tick_kimg = tick_kimg_dict.get(self.resolution, tick_kimg_base)
#----------------------------------------------------------------------------
# Main training script.
# To run, comment/uncomment appropriate lines in config.py and launch train.py.
def train_progressive_gan(
G_smoothing = 0.999, # Exponential running average of generator weights.
D_repeats = 1, # How many times the discriminator is trained per G iteration.
minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters.
reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced?
total_kimg = 15000, # Total length of the training, measured in thousands of real images.
mirror_augment = False, # Enable mirror augment?
drange_net = [-1,1], # Dynamic range used when feeding image data to the networks.
image_snapshot_ticks = 1, # How often to export image snapshots?
network_snapshot_ticks = 10, # How often to export network snapshots?
save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file?
save_weight_histograms = False, # Include weight histograms in the tfevents file?
resume_run_id = None, # Run ID or network pkl to resume training from, None = start from scratch.
resume_snapshot = None, # Snapshot index to resume training from, None = autodetect.
resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule.
resume_time = 0.0): # Assumed wallclock time at the beginning. Affects reporting.
maintenance_start_time = time.time()
training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **config.dataset)
# Construct networks.
with tf.device('/gpu:0'):
if resume_run_id is not None:
network_pkl = misc.locate_network_pkl(resume_run_id, resume_snapshot)
print('Loading networks from "%s"...' % network_pkl)
G, D, Gs = misc.load_pkl(network_pkl)
else:
print('Constructing networks...')
G = tfutil.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **config.G)
D = tfutil.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **config.D)
Gs = G.clone('Gs')
Gs_update_op = Gs.setup_as_moving_average_of(G, beta=G_smoothing)
G.print_layers(); D.print_layers()
print('Building TensorFlow graph...')
with tf.name_scope('Inputs'):
lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[])
lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[])
minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[])
minibatch_split = minibatch_in // config.num_gpus
reals, labels = training_set.get_minibatch_tf()
reals_split = tf.split(reals, config.num_gpus)
labels_split = tf.split(labels, config.num_gpus)
G_opt = tfutil.Optimizer(name='TrainG', learning_rate=lrate_in, **config.G_opt)
D_opt = tfutil.Optimizer(name='TrainD', learning_rate=lrate_in, **config.D_opt)
for gpu in range(config.num_gpus):
with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu):
G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow')
D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow')
lod_assign_ops = [tf.assign(G_gpu.find_var('lod'), lod_in), tf.assign(D_gpu.find_var('lod'), lod_in)]
reals_gpu = process_reals(reals_split[gpu], lod_in, mirror_augment, training_set.dynamic_range, drange_net)
labels_gpu = labels_split[gpu]
with tf.name_scope('G_loss'), tf.control_dependencies(lod_assign_ops):
G_loss = tfutil.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_split, **config.G_loss)
with tf.name_scope('D_loss'), tf.control_dependencies(lod_assign_ops):
D_loss = tfutil.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_split, reals=reals_gpu, labels=labels_gpu, **config.D_loss)
G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables)
D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables)
G_train_op = G_opt.apply_updates()
D_train_op = D_opt.apply_updates()
print('Setting up snapshot image grid...')
grid_size, grid_reals, grid_labels, grid_latents = setup_snapshot_image_grid(G, training_set, **config.grid)
sched = TrainingSchedule(total_kimg * 1000, training_set, **config.sched)
grid_fakes = Gs.run(grid_latents, grid_labels, minibatch_size=sched.minibatch//config.num_gpus)
print('Setting up result dir...')
result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
misc.save_image_grid(grid_reals, os.path.join(result_subdir, 'reals.png'), drange=training_set.dynamic_range, grid_size=grid_size)
misc.save_image_grid(grid_fakes, os.path.join(result_subdir, 'fakes%06d.png' % 0), drange=drange_net, grid_size=grid_size)
summary_log = tf.summary.FileWriter(result_subdir)
if save_tf_graph:
summary_log.add_graph(tf.get_default_graph())
if save_weight_histograms:
G.setup_weight_histograms(); D.setup_weight_histograms()
print('Training...')
cur_nimg = int(resume_kimg * 1000)
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
train_start_time = tick_start_time - resume_time
prev_lod = -1.0
while cur_nimg < total_kimg * 1000:
# Choose training parameters and configure training ops.
sched = TrainingSchedule(cur_nimg, training_set, **config.sched)
training_set.configure(sched.minibatch, sched.lod)
if reset_opt_for_new_lod:
if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod):
G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state()
prev_lod = sched.lod
# Run training ops.
for repeat in range(minibatch_repeats):
for _ in range(D_repeats):
tfutil.run([D_train_op, Gs_update_op], {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_in: sched.minibatch})
cur_nimg += sched.minibatch
tfutil.run([G_train_op], {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_in: sched.minibatch})
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done:
cur_tick += 1
cur_time = time.time()
tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0
tick_start_nimg = cur_nimg
tick_time = cur_time - tick_start_time
total_time = cur_time - train_start_time
maintenance_time = tick_start_time - maintenance_start_time
maintenance_start_time = cur_time
# Report progress.
print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %.1f' % (
tfutil.autosummary('Progress/tick', cur_tick),
tfutil.autosummary('Progress/kimg', cur_nimg / 1000.0),
tfutil.autosummary('Progress/lod', sched.lod),
tfutil.autosummary('Progress/minibatch', sched.minibatch),
misc.format_time(tfutil.autosummary('Timing/total_sec', total_time)),
tfutil.autosummary('Timing/sec_per_tick', tick_time),
tfutil.autosummary('Timing/sec_per_kimg', tick_time / tick_kimg),
tfutil.autosummary('Timing/maintenance_sec', maintenance_time)))
tfutil.autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
tfutil.autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))
tfutil.save_summaries(summary_log, cur_nimg)
# Save snapshots.
if cur_tick % image_snapshot_ticks == 0 or done:
grid_fakes = Gs.run(grid_latents, grid_labels, minibatch_size=sched.minibatch//config.num_gpus)
misc.save_image_grid(grid_fakes, os.path.join(result_subdir, 'fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size)
if cur_tick % network_snapshot_ticks == 0 or done:
misc.save_pkl((G, D, Gs), os.path.join(result_subdir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000)))
# Record start time of the next tick.
tick_start_time = time.time()
# Write final results.
misc.save_pkl((G, D, Gs), os.path.join(result_subdir, 'network-final.pkl'))
summary_log.close()
open(os.path.join(result_subdir, '_training-done.txt'), 'wt').close()
#----------------------------------------------------------------------------
# Main entry point.
# Calls the function indicated in config.py.
if __name__ == "__main__":
misc.init_output_logging()
np.random.seed(config.random_seed)
print('Initializing TensorFlow...')
os.environ.update(config.env)
tfutil.init_tf(config.tf_config)
print('Running %s()...' % config.train['func'])
tfutil.call_func_by_name(**config.train)
print('Exiting...')
#----------------------------------------------------------------------------
| 35,370 | 49.747489 | 190 | py |
Beholder-GAN | Beholder-GAN-master/util_scripts.py | #Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
#
#Attribution-NonCommercial 4.0 International
#
#=======================================================================
#
#Creative Commons Corporation ("Creative Commons") is not a law firm and
#does not provide legal services or legal advice. Distribution of
#Creative Commons public licenses does not create a lawyer-client or
#other relationship. Creative Commons makes its licenses and related
#information available on an "as-is" basis. Creative Commons gives no
#warranties regarding its licenses, any material licensed under their
#terms and conditions, or any related information. Creative Commons
#disclaims all liability for damages resulting from their use to the
#fullest extent possible.
#
#Using Creative Commons Public Licenses
#
#Creative Commons public licenses provide a standard set of terms and
#conditions that creators and other rights holders may use to share
#original works of authorship and other material subject to copyright
#and certain other rights specified in the public license below. The
#following considerations are for informational purposes only, are not
#exhaustive, and do not form part of our licenses.
#
# Considerations for licensors: Our public licenses are
# intended for use by those authorized to give the public
# permission to use material in ways otherwise restricted by
# copyright and certain other rights. Our licenses are
# irrevocable. Licensors should read and understand the terms
# and conditions of the license they choose before applying it.
# Licensors should also secure all rights necessary before
# applying our licenses so that the public can reuse the
# material as expected. Licensors should clearly mark any
# material not subject to the license. This includes other CC-
# licensed material, or material used under an exception or
# limitation to copyright. More considerations for licensors:
# wiki.creativecommons.org/Considerations_for_licensors
#
# Considerations for the public: By using one of our public
# licenses, a licensor grants the public permission to use the
# licensed material under specified terms and conditions. If
# the licensor's permission is not necessary for any reason--for
# example, because of any applicable exception or limitation to
# copyright--then that use is not regulated by the license. Our
# licenses grant only permissions under copyright and certain
# other rights that a licensor has authority to grant. Use of
# the licensed material may still be restricted for other
# reasons, including because others have copyright or other
# rights in the material. A licensor may make special requests,
# such as asking that all changes be marked or described.
# Although not required by our licenses, you are encouraged to
# respect those requests where reasonable. More_considerations
# for the public:
# wiki.creativecommons.org/Considerations_for_licensees
#
#=======================================================================
#
#Creative Commons Attribution-NonCommercial 4.0 International Public
#License
#
#By exercising the Licensed Rights (defined below), You accept and agree
#to be bound by the terms and conditions of this Creative Commons
#Attribution-NonCommercial 4.0 International Public License ("Public
#License"). To the extent this Public License may be interpreted as a
#contract, You are granted the Licensed Rights in consideration of Your
#acceptance of these terms and conditions, and the Licensor grants You
#such rights in consideration of benefits the Licensor receives from
#making the Licensed Material available under these terms and
#conditions.
#
#
#Section 1 -- Definitions.
#
# a. Adapted Material means material subject to Copyright and Similar
# Rights that is derived from or based upon the Licensed Material
# and in which the Licensed Material is translated, altered,
# arranged, transformed, or otherwise modified in a manner requiring
# permission under the Copyright and Similar Rights held by the
# Licensor. For purposes of this Public License, where the Licensed
# Material is a musical work, performance, or sound recording,
# Adapted Material is always produced where the Licensed Material is
# synched in timed relation with a moving image.
#
# b. Adapter's License means the license You apply to Your Copyright
# and Similar Rights in Your contributions to Adapted Material in
# accordance with the terms and conditions of this Public License.
#
# c. Copyright and Similar Rights means copyright and/or similar rights
# closely related to copyright including, without limitation,
# performance, broadcast, sound recording, and Sui Generis Database
# Rights, without regard to how the rights are labeled or
# categorized. For purposes of this Public License, the rights
# specified in Section 2(b)(1)-(2) are not Copyright and Similar
# Rights.
# d. Effective Technological Measures means those measures that, in the
# absence of proper authority, may not be circumvented under laws
# fulfilling obligations under Article 11 of the WIPO Copyright
# Treaty adopted on December 20, 1996, and/or similar international
# agreements.
#
# e. Exceptions and Limitations means fair use, fair dealing, and/or
# any other exception or limitation to Copyright and Similar Rights
# that applies to Your use of the Licensed Material.
#
# f. Licensed Material means the artistic or literary work, database,
# or other material to which the Licensor applied this Public
# License.
#
# g. Licensed Rights means the rights granted to You subject to the
# terms and conditions of this Public License, which are limited to
# all Copyright and Similar Rights that apply to Your use of the
# Licensed Material and that the Licensor has authority to license.
#
# h. Licensor means the individual(s) or entity(ies) granting rights
# under this Public License.
#
# i. NonCommercial means not primarily intended for or directed towards
# commercial advantage or monetary compensation. For purposes of
# this Public License, the exchange of the Licensed Material for
# other material subject to Copyright and Similar Rights by digital
# file-sharing or similar means is NonCommercial provided there is
# no payment of monetary compensation in connection with the
# exchange.
#
# j. Share means to provide material to the public by any means or
# process that requires permission under the Licensed Rights, such
# as reproduction, public display, public performance, distribution,
# dissemination, communication, or importation, and to make material
# available to the public including in ways that members of the
# public may access the material from a place and at a time
# individually chosen by them.
#
# k. Sui Generis Database Rights means rights other than copyright
# resulting from Directive 96/9/EC of the European Parliament and of
# the Council of 11 March 1996 on the legal protection of databases,
# as amended and/or succeeded, as well as other essentially
# equivalent rights anywhere in the world.
#
# l. You means the individual or entity exercising the Licensed Rights
# under this Public License. Your has a corresponding meaning.
#
#
#Section 2 -- Scope.
#
# a. License grant.
#
# 1. Subject to the terms and conditions of this Public License,
# the Licensor hereby grants You a worldwide, royalty-free,
# non-sublicensable, non-exclusive, irrevocable license to
# exercise the Licensed Rights in the Licensed Material to:
#
# a. reproduce and Share the Licensed Material, in whole or
# in part, for NonCommercial purposes only; and
#
# b. produce, reproduce, and Share Adapted Material for
# NonCommercial purposes only.
#
# 2. Exceptions and Limitations. For the avoidance of doubt, where
# Exceptions and Limitations apply to Your use, this Public
# License does not apply, and You do not need to comply with
# its terms and conditions.
#
# 3. Term. The term of this Public License is specified in Section
# 6(a).
#
# 4. Media and formats; technical modifications allowed. The
# Licensor authorizes You to exercise the Licensed Rights in
# all media and formats whether now known or hereafter created,
# and to make technical modifications necessary to do so. The
# Licensor waives and/or agrees not to assert any right or
# authority to forbid You from making technical modifications
# necessary to exercise the Licensed Rights, including
# technical modifications necessary to circumvent Effective
# Technological Measures. For purposes of this Public License,
# simply making modifications authorized by this Section 2(a)
# (4) never produces Adapted Material.
#
# 5. Downstream recipients.
#
# a. Offer from the Licensor -- Licensed Material. Every
# recipient of the Licensed Material automatically
# receives an offer from the Licensor to exercise the
# Licensed Rights under the terms and conditions of this
# Public License.
#
# b. No downstream restrictions. You may not offer or impose
# any additional or different terms or conditions on, or
# apply any Effective Technological Measures to, the
# Licensed Material if doing so restricts exercise of the
# Licensed Rights by any recipient of the Licensed
# Material.
#
# 6. No endorsement. Nothing in this Public License constitutes or
# may be construed as permission to assert or imply that You
# are, or that Your use of the Licensed Material is, connected
# with, or sponsored, endorsed, or granted official status by,
# the Licensor or others designated to receive attribution as
# provided in Section 3(a)(1)(A)(i).
#
# b. Other rights.
#
# 1. Moral rights, such as the right of integrity, are not
# licensed under this Public License, nor are publicity,
# privacy, and/or other similar personality rights; however, to
# the extent possible, the Licensor waives and/or agrees not to
# assert any such rights held by the Licensor to the limited
# extent necessary to allow You to exercise the Licensed
# Rights, but not otherwise.
#
# 2. Patent and trademark rights are not licensed under this
# Public License.
#
# 3. To the extent possible, the Licensor waives any right to
# collect royalties from You for the exercise of the Licensed
# Rights, whether directly or through a collecting society
# under any voluntary or waivable statutory or compulsory
# licensing scheme. In all other cases the Licensor expressly
# reserves any right to collect such royalties, including when
# the Licensed Material is used other than for NonCommercial
# purposes.
#
#
#Section 3 -- License Conditions.
#
#Your exercise of the Licensed Rights is expressly made subject to the
#following conditions.
#
# a. Attribution.
#
# 1. If You Share the Licensed Material (including in modified
# form), You must:
#
# a. retain the following if it is supplied by the Licensor
# with the Licensed Material:
#
# i. identification of the creator(s) of the Licensed
# Material and any others designated to receive
# attribution, in any reasonable manner requested by
# the Licensor (including by pseudonym if
# designated);
#
# ii. a copyright notice;
#
# iii. a notice that refers to this Public License;
#
# iv. a notice that refers to the disclaimer of
# warranties;
#
# v. a URI or hyperlink to the Licensed Material to the
# extent reasonably practicable;
#
# b. indicate if You modified the Licensed Material and
# retain an indication of any previous modifications; and
#
# c. indicate the Licensed Material is licensed under this
# Public License, and include the text of, or the URI or
# hyperlink to, this Public License.
#
# 2. You may satisfy the conditions in Section 3(a)(1) in any
# reasonable manner based on the medium, means, and context in
# which You Share the Licensed Material. For example, it may be
# reasonable to satisfy the conditions by providing a URI or
# hyperlink to a resource that includes the required
# information.
#
# 3. If requested by the Licensor, You must remove any of the
# information required by Section 3(a)(1)(A) to the extent
# reasonably practicable.
#
# 4. If You Share Adapted Material You produce, the Adapter's
# License You apply must not prevent recipients of the Adapted
# Material from complying with this Public License.
#
#
#Section 4 -- Sui Generis Database Rights.
#
#Where the Licensed Rights include Sui Generis Database Rights that
#apply to Your use of the Licensed Material:
#
# a. for the avoidance of doubt, Section 2(a)(1) grants You the right
# to extract, reuse, reproduce, and Share all or a substantial
# portion of the contents of the database for NonCommercial purposes
# only;
#
# b. if You include all or a substantial portion of the database
# contents in a database in which You have Sui Generis Database
# Rights, then the database in which You have Sui Generis Database
# Rights (but not its individual contents) is Adapted Material; and
#
# c. You must comply with the conditions in Section 3(a) if You Share
# all or a substantial portion of the contents of the database.
#
#For the avoidance of doubt, this Section 4 supplements and does not
#replace Your obligations under this Public License where the Licensed
#Rights include other Copyright and Similar Rights.
#
#
#Section 5 -- Disclaimer of Warranties and Limitation of Liability.
#
# a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
# EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
# AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
# ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
# IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
# WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
# ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
# KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
# ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
#
# b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
# TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
# NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
# COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
# USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
# DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
# IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
#
# c. The disclaimer of warranties and limitation of liability provided
# above shall be interpreted in a manner that, to the extent
# possible, most closely approximates an absolute disclaimer and
# waiver of all liability.
#
#
#Section 6 -- Term and Termination.
#
# a. This Public License applies for the term of the Copyright and
# Similar Rights licensed here. However, if You fail to comply with
# this Public License, then Your rights under this Public License
# terminate automatically.
#
# b. Where Your right to use the Licensed Material has terminated under
# Section 6(a), it reinstates:
#
# 1. automatically as of the date the violation is cured, provided
# it is cured within 30 days of Your discovery of the
# violation; or
#
# 2. upon express reinstatement by the Licensor.
#
# For the avoidance of doubt, this Section 6(b) does not affect any
# right the Licensor may have to seek remedies for Your violations
# of this Public License.
#
# c. For the avoidance of doubt, the Licensor may also offer the
# Licensed Material under separate terms or conditions or stop
# distributing the Licensed Material at any time; however, doing so
# will not terminate this Public License.
#
# d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
# License.
#
#
#Section 7 -- Other Terms and Conditions.
#
# a. The Licensor shall not be bound by any additional or different
# terms or conditions communicated by You unless expressly agreed.
#
# b. Any arrangements, understandings, or agreements regarding the
# Licensed Material not stated herein are separate from and
# independent of the terms and conditions of this Public License.
#
#
#Section 8 -- Interpretation.
#
# a. For the avoidance of doubt, this Public License does not, and
# shall not be interpreted to, reduce, limit, restrict, or impose
# conditions on any use of the Licensed Material that could lawfully
# be made without permission under this Public License.
#
# b. To the extent possible, if any provision of this Public License is
# deemed unenforceable, it shall be automatically reformed to the
# minimum extent necessary to make it enforceable. If the provision
# cannot be reformed, it shall be severed from this Public License
# without affecting the enforceability of the remaining terms and
# conditions.
#
# c. No term or condition of this Public License will be waived and no
# failure to comply consented to unless expressly agreed to by the
# Licensor.
#
# d. Nothing in this Public License constitutes or may be interpreted
# as a limitation upon, or waiver of, any privileges and immunities
# that apply to the Licensor or You, including from the legal
# processes of any jurisdiction or authority.
#
#=======================================================================
#
#Creative Commons is not a party to its public
#licenses. Notwithstanding, Creative Commons may elect to apply one of
#its public licenses to material it publishes and in those instances
#will be considered the "Licensor." The text of the Creative Commons
#public licenses is dedicated to the public domain under the CC0 Public
#Domain Dedication. Except for the limited purpose of indicating that
#material is shared under a Creative Commons public license or as
#otherwise permitted by the Creative Commons policies published at
#creativecommons.org/policies, Creative Commons does not authorize the
#use of the trademark "Creative Commons" or any other trademark or logo
#of Creative Commons without its prior written consent including,
#without limitation, in connection with any unauthorized modifications
#to any of its public licenses or any other arrangements,
#understandings, or agreements concerning use of licensed material. For
#the avoidance of doubt, this paragraph does not form part of the
#public licenses.
#
#Creative Commons may be contacted at creativecommons.org.
import os
import time
import re
import bisect
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import scipy.ndimage
import scipy.misc
import config
import misc
import tfutil
import train
import dataset
#----------------------------------------------------------------------------
# Generate random images or image grids using a previously trained network.
# To run, uncomment the appropriate line in config.py and launch train.py.
def generate_fake_images(run_id, snapshot=None, grid_size=[1,1], num_pngs=1, image_shrink=1, png_prefix=None, random_seed=1000, minibatch_size=8):
network_pkl = misc.locate_network_pkl(run_id, snapshot)
if png_prefix is None:
png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
random_state = np.random.RandomState(random_seed)
print('Loading network from "%s"...' % network_pkl)
G, D, Gs = misc.load_network_pkl(run_id, snapshot)
result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
for png_idx in range(num_pngs):
print('Generating png %d / %d...' % (png_idx, num_pngs))
latents = misc.random_latents(np.prod(grid_size), Gs, random_state=random_state)
labels = np.zeros([latents.shape[0], 0], np.float32)
images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
misc.save_image_grid(images, os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx)), [0,255], grid_size)
open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
#----------------------------------------------------------------------------
# Generate MP4 video of random interpolations using a previously trained network.
# To run, uncomment the appropriate line in config.py and launch train.py.
def generate_interpolation_video(run_id, snapshot=None, grid_size=[1,1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8):
network_pkl = misc.locate_network_pkl(run_id, snapshot)
if mp4 is None:
mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4'
num_frames = int(np.rint(duration_sec * mp4_fps))
random_state = np.random.RandomState(random_seed)
print('Loading network from "%s"...' % network_pkl)
G, D, Gs = misc.load_network_pkl(run_id, snapshot)
print('Generating latent vectors...')
shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component]
all_latents = random_state.randn(*shape).astype(np.float32)
all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap')
all_latents /= np.sqrt(np.mean(np.square(all_latents)))
# Frame generation func for moviepy.
def make_frame(t):
frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
latents = all_latents[frame_idx]
labels = np.zeros([latents.shape[0], 0], np.float32)
images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
grid = misc.create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC
if image_zoom > 1:
grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0)
if grid.shape[2] == 1:
grid = grid.repeat(3, 2) # grayscale => RGB
return grid
# Generate video.
import moviepy.editor # pip install moviepy
result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)
open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
#----------------------------------------------------------------------------
# Generate MP4 video of training progress for a previous training run.
# To run, uncomment the appropriate line in config.py and launch train.py.
def generate_training_video(run_id, duration_sec=20.0, time_warp=1.5, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M'):
src_result_subdir = misc.locate_result_subdir(run_id)
if mp4 is None:
mp4 = os.path.basename(src_result_subdir) + '-train.mp4'
# Parse log.
times = []
snaps = [] # [(png, kimg, lod), ...]
with open(os.path.join(src_result_subdir, 'log.txt'), 'rt') as log:
for line in log:
k = re.search(r'kimg ([\d\.]+) ', line)
l = re.search(r'lod ([\d\.]+) ', line)
t = re.search(r'time (\d+d)? *(\d+h)? *(\d+m)? *(\d+s)? ', line)
if k and l and t:
k = float(k.group(1))
l = float(l.group(1))
t = [int(t.group(i)[:-1]) if t.group(i) else 0 for i in range(1, 5)]
t = t[0] * 24*60*60 + t[1] * 60*60 + t[2] * 60 + t[3]
png = os.path.join(src_result_subdir, 'fakes%06d.png' % int(np.floor(k)))
if os.path.isfile(png):
times.append(t)
snaps.append((png, k, l))
assert len(times)
# Frame generation func for moviepy.
png_cache = [None, None] # [png, img]
def make_frame(t):
wallclock = ((t / duration_sec) ** time_warp) * times[-1]
png, kimg, lod = snaps[max(bisect.bisect(times, wallclock) - 1, 0)]
if png_cache[0] == png:
img = png_cache[1]
else:
img = scipy.misc.imread(png)
while img.shape[1] > 1920 or img.shape[0] > 1080:
img = img.astype(np.float32).reshape(img.shape[0]//2, 2, img.shape[1]//2, 2, -1).mean(axis=(1,3))
png_cache[:] = [png, img]
img = misc.draw_text_label(img, 'lod %.2f' % lod, 16, img.shape[0]-4, alignx=0.0, aligny=1.0)
img = misc.draw_text_label(img, misc.format_time(int(np.rint(wallclock))), img.shape[1]//2, img.shape[0]-4, alignx=0.5, aligny=1.0)
img = misc.draw_text_label(img, '%.0f kimg' % kimg, img.shape[1]-16, img.shape[0]-4, alignx=1.0, aligny=1.0)
return img
# Generate video.
import moviepy.editor # pip install moviepy
result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)
open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
#----------------------------------------------------------------------------
# Evaluate one or more metrics for a previous training run.
# To run, uncomment one of the appropriate lines in config.py and launch train.py.
def evaluate_metrics(run_id, log, metrics, num_images, real_passes, minibatch_size=None):
metric_class_names = {
'swd': 'metrics.sliced_wasserstein.API',
'fid': 'metrics.frechet_inception_distance.API',
'is': 'metrics.inception_score.API',
'msssim': 'metrics.ms_ssim.API',
}
# Locate training run and initialize logging.
result_subdir = misc.locate_result_subdir(run_id)
snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False)
assert len(snapshot_pkls) >= 1
log_file = os.path.join(result_subdir, log)
print('Logging output to', log_file)
misc.set_output_log_file(log_file)
# Initialize dataset and select minibatch size.
dataset_obj, mirror_augment = misc.load_dataset_for_previous_run(result_subdir, verbose=True, shuffle_mb=0)
if minibatch_size is None:
minibatch_size = np.clip(8192 // dataset_obj.shape[1], 4, 256)
# Initialize metrics.
metric_objs = []
for name in metrics:
class_name = metric_class_names.get(name, name)
print('Initializing %s...' % class_name)
class_def = tfutil.import_obj(class_name)
image_shape = [3] + dataset_obj.shape[1:]
obj = class_def(num_images=num_images, image_shape=image_shape, image_dtype=np.uint8, minibatch_size=minibatch_size)
tfutil.init_uninited_vars()
mode = 'warmup'
obj.begin(mode)
for idx in range(10):
obj.feed(mode, np.random.randint(0, 256, size=[minibatch_size]+image_shape, dtype=np.uint8))
obj.end(mode)
metric_objs.append(obj)
# Print table header.
print()
print('%-10s%-12s' % ('Snapshot', 'Time_eval'), end='')
for obj in metric_objs:
for name, fmt in zip(obj.get_metric_names(), obj.get_metric_formatting()):
print('%-*s' % (len(fmt % 0), name), end='')
print()
print('%-10s%-12s' % ('---', '---'), end='')
for obj in metric_objs:
for fmt in obj.get_metric_formatting():
print('%-*s' % (len(fmt % 0), '---'), end='')
print()
# Feed in reals.
for title, mode in [('Reals', 'reals'), ('Reals2', 'fakes')][:real_passes]:
print('%-10s' % title, end='')
time_begin = time.time()
labels = np.zeros([num_images, dataset_obj.label_size], dtype=np.float32)
[obj.begin(mode) for obj in metric_objs]
for begin in range(0, num_images, minibatch_size):
end = min(begin + minibatch_size, num_images)
images, labels[begin:end] = dataset_obj.get_minibatch_np(end - begin)
if mirror_augment:
images = misc.apply_mirror_augment(images)
if images.shape[1] == 1:
images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB
[obj.feed(mode, images) for obj in metric_objs]
results = [obj.end(mode) for obj in metric_objs]
print('%-12s' % misc.format_time(time.time() - time_begin), end='')
for obj, vals in zip(metric_objs, results):
for val, fmt in zip(vals, obj.get_metric_formatting()):
print(fmt % val, end='')
print()
# Evaluate each network snapshot.
for snapshot_idx, snapshot_pkl in enumerate(reversed(snapshot_pkls)):
prefix = 'network-snapshot-'; postfix = '.pkl'
snapshot_name = os.path.basename(snapshot_pkl)
assert snapshot_name.startswith(prefix) and snapshot_name.endswith(postfix)
snapshot_kimg = int(snapshot_name[len(prefix) : -len(postfix)])
print('%-10d' % snapshot_kimg, end='')
mode ='fakes'
[obj.begin(mode) for obj in metric_objs]
time_begin = time.time()
with tf.Graph().as_default(), tfutil.create_session(config.tf_config).as_default():
G, D, Gs = misc.load_pkl(snapshot_pkl)
for begin in range(0, num_images, minibatch_size):
end = min(begin + minibatch_size, num_images)
latents = misc.random_latents(end - begin, Gs)
images = Gs.run(latents, labels[begin:end], num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_dtype=np.uint8)
if images.shape[1] == 1:
images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB
[obj.feed(mode, images) for obj in metric_objs]
results = [obj.end(mode) for obj in metric_objs]
print('%-12s' % misc.format_time(time.time() - time_begin), end='')
for obj, vals in zip(metric_objs, results):
for val, fmt in zip(vals, obj.get_metric_formatting()):
print(fmt % val, end='')
print()
print()
#----------------------------------------------------------------------------
| 31,455 | 47.768992 | 239 | py |
Beholder-GAN | Beholder-GAN-master/beauty_prediction/execute_beauty_prediction.py | from __future__ import print_function, division
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torchvision import transforms, models
from torch.autograd import Variable
import os
import numpy as np
from PIL import Image
import csv
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='experiments/train_beauty_vgg/VGG16_beauty_rates.pt', help='path to the trained VGG16 model')
parser.add_argument('--dataset', type=str, default='../datasets/CelebA-HQ', help='path to the dataset we want to label')
parser.add_argument('--beauty_rates', type=int, default=60, help='number of beauty rates/output neurons for the last layer')
parser.add_argument('--pad_x', type=int, default=0, help='pixels to pad the given images from left and right')
parser.add_argument('--pad_y', type=int, default=0, help='pixels to pad the given images from up and down')
opt = parser.parse_args()
print(opt)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True
# VGG-16 Takes 224x224 images as input
transform=transforms.Compose([
transforms.Pad((opt.pad_x,opt.pad_y)),
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# Load the pretrained model from pytorch
vgg16 = models.vgg16_bn(pretrained=True)
# Freeze training for all layers
for param in vgg16.features.parameters():
param.require_grad = False
# Newly created modules have require_grad=True by default
num_features = vgg16.classifier[6].in_features
features = list(vgg16.classifier.children())[:-1] # Remove last layer
features.extend([nn.Linear(num_features, opt.beauty_rates)]) # Add our layer with opt.beauty_rates outputs
vgg16.classifier = nn.Sequential(*features) # Replace the model classifier
# move model to gpu
if torch.cuda.device_count() > 1:
print("Running on", torch.cuda.device_count(), "GPUs.")
vgg16 = nn.DataParallel(vgg16)
else:
print("Running on CPU.")
vgg16.to(device)
# upload pretrained weights from beauty labeled dataset
vgg16.load_state_dict(torch.load(opt.model))
vgg16.eval()
# create beauty rates lists for each image in dataset
files = []
beauty_rates = []
images_dir = "{0}/img".format(opt.dataset)
number_of_images = len(os.listdir(images_dir))
for i, file in enumerate(sorted(os.listdir(images_dir))):
# open image, transform and upload to gpu
img = Image.open(os.path.join(images_dir,file))
img = transform(img)
img = torch.from_numpy(np.asarray(img))
if torch.cuda.is_available():
with torch.no_grad():
img = Variable(img.cuda())
else:
with torch.no_grad():
img = Variable(img)
img = torch.unsqueeze(img,0)
# infer image to receive beauty rates
output = vgg16(img)
# convert output tensor into list with rounded values
output_list = (output.data.cpu().numpy().tolist())[0]
output_list = [round(x,4) for x in output_list]
# add file and beauty rates to lists
files.append(file)
beauty_rates.append(output_list)
if (i % 100 == 0):
print('{0}/{1} images done'.format(i,number_of_images))
# convert lists to csv lines
csv_lines = []
for i in range(0,opt.beauty_rates):
for j in range(0,number_of_images):
csv_lines.append('{0},{1},{2},'.format(str(i+1),files[j],str(beauty_rates[j][i]*5.0)))
# write csv lines to file
csv_path = "{0}/All_Ratings.csv".format(opt.dataset)
with open(csv_path, "wb") as csv_file:
for line in csv_lines:
csv_file.write(line)
csv_file.write('\n')
| 3,738 | 35.300971 | 142 | py |
Beholder-GAN | Beholder-GAN-master/beauty_prediction/train_beauty_prediction.py | from __future__ import print_function, division
import argparse
import os
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torchvision import transforms, models
from torch.autograd import Variable
import time
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from faces_dataset import FacesDataset
from torch.utils.data.sampler import SubsetRandomSampler
import copy
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default=None, help='Where to load the dataset from')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--beauty_rates', type=int, default=60, help='number of beauty rates/output neurons for the last layer')
parser.add_argument('--niter', type=int, default=10, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate, default=1e-4')
parser.add_argument('--experiment', default=None, help='Where to store samples and models')
opt = parser.parse_args()
print(opt)
# create experiments directory
if not os.path.exists(opt.experiment):
os.makedirs(opt.experiment)
# use cuda if available, cpu if not
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True
# VGG-16 Takes 224x224 images as input
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# load labeled beauty rates dataset
dataset = FacesDataset(opt.dataset, transform)
# split dataset to 80% train, 20% validation
train_split = .8
validation_split = .2
shuffle_dataset = True
random_seed= 42
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
indices = list(range(dataset_size))
split_val = int(np.floor(validation_split * dataset_size))
if shuffle_dataset :
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices = indices[split_val:]
val_indices = indices[:split_val]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
validation_sampler = SubsetRandomSampler(val_indices)
# create data loaders for train and validation sets
train_loader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
sampler=train_sampler, num_workers=int(opt.workers))
validation_loader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
sampler=validation_sampler, num_workers=int(opt.workers))
# Load the pretrained model from pytorch
vgg16 = models.vgg16_bn(pretrained=True)
# Freeze training for all layers
for param in vgg16.features.parameters():
param.require_grad = False
# Newly created modules have require_grad=True by default
num_features = vgg16.classifier[6].in_features
features = list(vgg16.classifier.children())[:-1] # Remove last layer
features.extend([nn.Linear(num_features, opt.beauty_rates)]) # Add our layer with opt.beauty_rates outputs and activation on it
vgg16.classifier = nn.Sequential(*features) # Replace the model classifier
# check if several GPUs exist and move model to gpu if available
if torch.cuda.device_count() > 1:
print("Running on", torch.cuda.device_count(), "GPUs.")
vgg16 = nn.DataParallel(vgg16)
else:
print("Running on single GPU.")
vgg16.to(device)
# define loss and optimization
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(vgg16.parameters(), lr=opt.lr)
# function to train the model
def train_model(vgg, criterion, optimizer, num_epochs=10):
since = time.time()
best_model_wts = copy.deepcopy(vgg.state_dict())
min_loss = 9999.0
# save losses to plot graph
avg_loss = 0
avg_loss_val = 0
avg_loss_list = []
avg_loss_val_list = []
train_batches = len(train_loader)
val_batches = len(validation_loader)
for epoch in range(opt.niter):
print("Epoch {}/{}".format(epoch, opt.niter))
print('-' * 10)
loss_train = 0
loss_val = 0
# change model to training mode
vgg.train(True)
for i, data in enumerate(train_loader):
if i % 100 == 0:
print("\rTraining batch {}/{}".format(i, train_batches))
# get images and labels from data loader
images, beauty_rates, beauty_class = data
# move to gpu if available
if torch.cuda.is_available():
images, beauty_rates = Variable(images.to(device)), Variable(beauty_rates.to(device))
else:
images, beauty_rates = Variable(images), Variable(beauty_rates)
optimizer.zero_grad()
# infer images and compute loss
outputs = vgg(images)
beauty_rates = torch.squeeze(beauty_rates, 1)
loss = criterion(outputs, beauty_rates)
loss.backward()
optimizer.step()
# sum batches losses
loss_train += loss.data[0]
# free memory
del images, beauty_class, beauty_rates, outputs
torch.cuda.empty_cache()
print()
avg_loss = loss_train / (len(dataset)*train_split)
avg_loss_list.append(avg_loss)
vgg.train(False)
vgg.eval()
for i, data in enumerate(validation_loader):
if i % 100 == 0:
print("\rValidation batch {}/{}".format(i, val_batches))
# get images and labels from data loader
images, beauty_rates, beauty_class = data
# move to gpu if available
if torch.cuda.is_available():
with torch.no_grad():
images, beauty_rates = Variable(images.to(device)), Variable(beauty_rates.to(device))
else:
with torch.no_grad():
images, beauty_rates = Variable(images), Variable(beauty_rates)
optimizer.zero_grad()
# infer images and compute loss
outputs = vgg(images)
beauty_rates = torch.squeeze(beauty_rates, 1)
loss = criterion(outputs, beauty_rates)
# sum batches losses
loss_val += loss.data[0]
# free memory
del images, beauty_rates, beauty_class, outputs
torch.cuda.empty_cache()
avg_loss_val = loss_val / (len(dataset)*validation_split)
avg_loss_val_list.append(avg_loss_val)
print()
print("Epoch {} result: ".format(epoch))
print("Avg loss (train): {:.4f}".format(avg_loss))
print("Avg loss (val): {:.4f}".format(avg_loss_val))
print('-' * 10)
print()
# save model state if validation loss improved
if avg_loss_val < min_loss:
min_loss = avg_loss_val
best_model_wts = copy.deepcopy(vgg.state_dict())
elapsed_time = time.time() - since
print()
print("Training completed in {:.0f}m {:.0f}s".format(elapsed_time // 60, elapsed_time % 60))
print("Minimal loss: {:.4f}".format(min_loss))
# plot graph
plt.clf()
plt.title('Beauty rates loss')
plt.xlabel('Epoch')
plt.plot(range(opt.niter), avg_loss_list, label='Train loss')
plt.plot(range(opt.niter), avg_loss_val_list, label='Validation loss')
plt.legend(['Train loss', 'Validation loss'], loc='upper right')
plt.savefig('{0}/beauty_rates_loss.png'.format(opt.experiment))
vgg.load_state_dict(best_model_wts)
return vgg
# train model and save final model
vgg16 = train_model(vgg16, criterion, optimizer, opt.niter)
torch.save(vgg16.state_dict(), '{0}/VGG16_beauty_rates.pt'.format(opt.experiment))
| 8,255 | 35.052402 | 127 | py |
Beholder-GAN | Beholder-GAN-master/beauty_prediction/execute_beauty_prediction_single.py | from __future__ import print_function, division
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torchvision import transforms, models
from torch.autograd import Variable
import os
import numpy as np
from PIL import Image
import csv
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='experiments/train_beauty_vgg/VGG16_beauty_rates.pt', help='path to the trained VGG16 model')
parser.add_argument('--image', type=str, default='sample.png', help='path to the trained VGG16 model')
parser.add_argument('--beauty_rates', type=int, default=60, help='number of beauty rates/output neurons for the last layer')
parser.add_argument('--pad_x', type=int, default=0, help='pixels to pad the given images from left and right')
parser.add_argument('--pad_y', type=int, default=0, help='pixels to pad the given images from up and down')
opt = parser.parse_args()
print(opt)
# define cuda as device if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True
# VGG-16 Takes 224x224 images as input
transform=transforms.Compose([
transforms.Pad((opt.pad_x,opt.pad_y)),
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# Load the pretrained model from pytorch
vgg16 = models.vgg16_bn(pretrained=True)
#print(vgg16.classifier[6].out_features) # 1000
# Freeze training for all layers
for param in vgg16.features.parameters():
param.require_grad = False
# Newly created modules have require_grad=True by default
num_features = vgg16.classifier[6].in_features
features = list(vgg16.classifier.children())[:-1] # Remove last layer
features.extend([nn.Linear(num_features, opt.beauty_rates)]) # Add our layer with opt.beauty_rates outputs
vgg16.classifier = nn.Sequential(*features) # Replace the model classifier
# check if several GPUs exist and move model to gpu if available
if torch.cuda.device_count() > 1:
print("Running on", torch.cuda.device_count(), "GPUs.")
vgg16 = nn.DataParallel(vgg16)
else:
print("Running on single GPU.")
vgg16.to(device)
# upload pretrained weights from beauty labeled dataset
vgg16.load_state_dict(torch.load(opt.model))
vgg16.eval()
# open image, transform and upload to gpu
img = Image.open(opt.image)
img = transform(img)
img = torch.from_numpy(np.asarray(img))
if torch.cuda.is_available():
with torch.no_grad():
img = Variable(img.cuda())
else:
with torch.no_grad():
img = Variable(img)
img = torch.unsqueeze(img,0)
# infer image to receive beauty rates
output = vgg16(img)
print("beauty_rates:")
print(output)
print("mean:")
print(output.mean())
| 2,908 | 34.47561 | 142 | py |
Beholder-GAN | Beholder-GAN-master/beauty_prediction/faces_dataset.py | import csv
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from PIL import Image
import matplotlib.pyplot as plt
##### Dataset for Face images with beauty rates #####
# Each entry will contain: #
# Face image #
# List of 60 beauty grades in the range of [1,5] #
raters_number = 60
class FacesDataset(Dataset):
# lists to store dataset:
images = [] # each var is a string of image name
beauty_rates = [] # each var is numpy in size of 60 with floats in range of [0,1]
def __init__(self, folder_dataset, transform=None):
self.transform = transform
# Dictionary to load dataset
# key: image name
# value: list of 60 beauty rates from raters
dataset_dict = {}
# read raters csv file
with open(folder_dataset + '/All_Ratings.csv', 'r') as csvfile:
raw_dataset = csv.reader(csvfile, delimiter=',', quotechar='|')
for i, row in enumerate(raw_dataset):
row = ','.join(row)
row = row.split(',')
# create list of rates for each image
if row[1] in dataset_dict:
dataset_dict[row[1]][0].append(float(row[2]))
else:
dataset_dict[row[1]] = [[float(row[2])]]
# move dict to lists, convert beauty rates to numpy ranged in [0,1]
for key, value in dataset_dict.items():
self.images.append(folder_dataset + '/img/' + key)
self.beauty_rates.append((np.asarray(value, dtype=np.float32) / 5.0))
# Override to give PyTorch access to any image on the dataset
def __getitem__(self, index):
img = Image.open(self.images[index])
#img = img.convert('RGB') #TODO: check if necessary
# perform transform only on the image (!!)
if self.transform is not None:
img = self.transform(img)
# Convert image and beauty rates to torch tensors
img = torch.from_numpy(np.asarray(img))
features = torch.from_numpy(np.asarray(self.beauty_rates[index]).reshape([1,raters_number]))
# compute class for beauty rates in [1,10]
features_class = (torch.mean(features)* 10.0).int()
#return img, features, Is_Beauty
return img, features, features_class
# Override to give PyTorch size of dataset
def __len__(self):
return len(self.images)
if __name__ == "__main__":
train_dataset = FacesDataset('../datasets/beauty_dataset')
# sample one image and beauty rates to test correlation
image, features, features_class = train_dataset.__getitem__(5)
print("beauty rates: "+ str(features))
print("beauty rate mean: "+ str(features.mean()))
print("beauty rate class: "+ str(features_class))
| 2,953 | 35.02439 | 100 | py |